hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c1f18c0459608e99e83447e8a490c816e2e86bb3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cudnn.h>
#include <assert.h>
#include <math.h>
#include "kudnn.h"
#include <time.h>
#include "test.h"
#include <sys/time.h>
static const double kMicro = 1.0e-6;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
//cerr << "ERROR: Bad call to gettimeofday" << endl;
printf("ERROR: Bad call to gettimeofday\n");
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
int main(){
int VERBOSE=1;
srand(time(NULL));
const int N=1, C=1, H=6, W=6; // src
const int K=C, Hd=3, Wd=3; // window
// const int Hs=Hd, Ws=Hd; // stride
const int Hs=2, Ws=2; // stride
const int Hp=0, Wp=0; // padding
assert(H>=Hd); assert(W>=Wd);
const int Hy=1+ceil((H+2*Hp-Hd)/(double)Hs), Wy=1+ceil((W+2*Wp-Wd)/(double)Ws); // dst
printf("N:%d C:%d H:%d W:%d\n",N,C,H,W);
printf("Hd:%d Wd:%d Hs:%d Ws:%d Hp:%d Wp:%d\n",Hd,Wd,Hs,Ws,Hp,Wp);
printf("N:%d K:%d Hy:%d Wy:%d\n",N,C,Hy,Wy);
printf("\n");
double xData[N*C*H*W]; fillRandom(xData,N*C*H*W);
double dyData[N*K*Hy*Wy]; fillRandom(dyData,N*K*Hy*Wy);
if(VERBOSE){
printf("x:\n");
print2Dd(xData, H, W);
printf("dy:\n");
print2Dd(dyData, Hy, Wy);
printf("\n");
}
double t0, time_elapsed;
double *x_h = &xData[0], *dy_h = &dyData[0]; // given
double y_h[N*C*Hy*Wy], dx_h[N*C*H*W]; // compute cudnn
double y1_h[N*C*H*W], dx1_h[N*C*H*W]; // compute kunet
double *x_d, *y_d, *dx_d, *dy_d; // gpu pointers
gpuErrchk( hipMalloc(&x_d, sizeof(double)*N*C*H*W) );
gpuErrchk( hipMalloc(&dx_d, sizeof(double)*N*C*H*W) );
gpuErrchk( hipMalloc(&y_d, sizeof(double)*N*K*Hy*Wy) );
gpuErrchk( hipMalloc(&dy_d, sizeof(double)*N*K*Hy*Wy) );
// send x, dy to GPU
gpuErrchk( hipMemcpy(x_d, x_h, sizeof(double)*N*C*H*W, hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(dy_d, dy_h, sizeof(double)*N*K*Hy*Wy, hipMemcpyHostToDevice) );
// end send x, dy to GPU
//gpuErrchk( hipMemset(y_d, 0, sizeof(double)*N*K*Hy*Wy) );
//print2Dd(y_h, Hy, Wy);
/**
CUDNN KUNET COMPARISON TESTS
**/
cudnnHandle_t handle = NULL;
cudnnTensorDescriptor_t xDesc = NULL;
cudnnTensorDescriptor_t dxDesc = NULL;
cudnnTensorDescriptor_t yDesc = NULL;
cudnnTensorDescriptor_t dyDesc = NULL;
cudnnPoolingDescriptor_t maxPool00Desc = NULL;
// creation
cudnnErrchk( cudnnCreate( &handle) );
cudnnErrchk( cudnnCreateTensorDescriptor( &xDesc) );
cudnnErrchk( cudnnCreateTensorDescriptor( &dxDesc) );
cudnnErrchk( cudnnCreateTensorDescriptor( &yDesc) );
cudnnErrchk( cudnnCreateTensorDescriptor( &dyDesc) );
cudnnErrchk( cudnnCreatePoolingDescriptor( &maxPool00Desc) );
// end creation
// set
cudnnErrchk( cudnnSetTensor4dDescriptor(xDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, C, H, W) );
cudnnErrchk( cudnnSetTensor4dDescriptor(dxDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, C, H, W) );
cudnnErrchk( cudnnSetTensor4dDescriptor(yDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, K, Hy, Wy) );
cudnnErrchk( cudnnSetTensor4dDescriptor(dyDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, K, Hy, Wy) );
cudnnErrchk( cudnnSetPooling2dDescriptor(maxPool00Desc, CUDNN_POOLING_MAX, Hd,Wd,0,0,Hs,Ws) );
// end set input and conf
// set pool mode
cudnnPoolingDescriptor_t tpoolDesc = NULL;
tpoolDesc = maxPool00Desc;
printf("mode: maxPool00\n");
// end set pool mode
double alpha=1, beta=1;
// forward test
printf("y:\n");
t0 = getTime();
cudnnErrchk( cudnnPoolingForward(handle, tpoolDesc, &alpha, xDesc, x_d, &beta, yDesc, y_d) );
gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() );
time_elapsed = getTime() - t0; printf("cudnn: %.4f\n",time_elapsed);
gpuErrchk( hipMemcpy(y_h, y_d, sizeof(double)*N*K*Hy*Wy, hipMemcpyDeviceToHost) );
t0 = getTime();
cudnnErrchk( kunetPoolingForward(handle, tpoolDesc, &alpha, xDesc, x_d, &beta, yDesc, y_d) );
time_elapsed = getTime() - t0; printf("kudnn: %.4f\n",time_elapsed);
gpuErrchk( hipMemcpy(y1_h, y_d, sizeof(double)*N*K*Hy*Wy, hipMemcpyDeviceToHost) );
if(VERBOSE){print2Dd(y_h, Hy, Wy); printf("\n"); print2Dd(y1_h, Hy, Wy);}
assert(eqseq(y_h,y1_h,N*K*Hy*Wy) < 1.0E-4);
printf("y: ok.\n\n");
// end forward test
// backward test
printf("dx:\n");
t0 = getTime();
cudnnErrchk( cudnnPoolingBackward(handle, tpoolDesc, &alpha, yDesc, y_d, dyDesc, dy_d, xDesc, x_d, &beta, dxDesc, dx_d) );
gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() );
time_elapsed = getTime() - t0; printf("cudnn: %.4f\n",time_elapsed);
gpuErrchk( hipMemcpy(dx_h, dx_d, sizeof(double)*N*C*H*W, hipMemcpyDeviceToHost) );
t0 = getTime();
cudnnErrchk( kunetPoolingBackward(handle, tpoolDesc, &alpha, yDesc, y_d, dyDesc, dy_d, xDesc, x_d, &beta, dxDesc, dx_d) );
time_elapsed = getTime() - t0; printf("kudnn: %.4f\n",time_elapsed);
gpuErrchk( hipMemcpy(dx1_h, dx_d, sizeof(double)*N*C*H*W, hipMemcpyDeviceToHost) );
if(VERBOSE){print2Dd(dx_h, H, W); printf("\n");print2Dd(dx1_h, H, W);}
assert(eqseq(dx_h,dx1_h,N*C*H*W) < 1.0E-4);
printf("dx:ok\n");
// end backward test
printf("ok.\n");
// destroy
if (xDesc != NULL) cudnnDestroyTensorDescriptor(xDesc);
if (dxDesc != NULL) cudnnDestroyTensorDescriptor(dxDesc);
if (dyDesc != NULL) cudnnDestroyTensorDescriptor(dyDesc);
if (maxPool00Desc != NULL) cudnnDestroyPoolingDescriptor(maxPool00Desc);
if (handle != NULL) cudnnDestroy(handle);
// free
hipFree(x_d); hipFree(y_d);
hipFree(dx_d); hipFree(dy_d);
// END TESTS
return 0;
}
| c1f18c0459608e99e83447e8a490c816e2e86bb3.cu | #include <stdio.h>
#include <cudnn.h>
#include <assert.h>
#include <math.h>
#include "kudnn.h"
#include <time.h>
#include "test.h"
#include <sys/time.h>
static const double kMicro = 1.0e-6;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
//cerr << "ERROR: Bad call to gettimeofday" << endl;
printf("ERROR: Bad call to gettimeofday\n");
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
int main(){
int VERBOSE=1;
srand(time(NULL));
const int N=1, C=1, H=6, W=6; // src
const int K=C, Hd=3, Wd=3; // window
// const int Hs=Hd, Ws=Hd; // stride
const int Hs=2, Ws=2; // stride
const int Hp=0, Wp=0; // padding
assert(H>=Hd); assert(W>=Wd);
const int Hy=1+ceil((H+2*Hp-Hd)/(double)Hs), Wy=1+ceil((W+2*Wp-Wd)/(double)Ws); // dst
printf("N:%d C:%d H:%d W:%d\n",N,C,H,W);
printf("Hd:%d Wd:%d Hs:%d Ws:%d Hp:%d Wp:%d\n",Hd,Wd,Hs,Ws,Hp,Wp);
printf("N:%d K:%d Hy:%d Wy:%d\n",N,C,Hy,Wy);
printf("\n");
double xData[N*C*H*W]; fillRandom(xData,N*C*H*W);
double dyData[N*K*Hy*Wy]; fillRandom(dyData,N*K*Hy*Wy);
if(VERBOSE){
printf("x:\n");
print2Dd(xData, H, W);
printf("dy:\n");
print2Dd(dyData, Hy, Wy);
printf("\n");
}
double t0, time_elapsed;
double *x_h = &xData[0], *dy_h = &dyData[0]; // given
double y_h[N*C*Hy*Wy], dx_h[N*C*H*W]; // compute cudnn
double y1_h[N*C*H*W], dx1_h[N*C*H*W]; // compute kunet
double *x_d, *y_d, *dx_d, *dy_d; // gpu pointers
gpuErrchk( cudaMalloc(&x_d, sizeof(double)*N*C*H*W) );
gpuErrchk( cudaMalloc(&dx_d, sizeof(double)*N*C*H*W) );
gpuErrchk( cudaMalloc(&y_d, sizeof(double)*N*K*Hy*Wy) );
gpuErrchk( cudaMalloc(&dy_d, sizeof(double)*N*K*Hy*Wy) );
// send x, dy to GPU
gpuErrchk( cudaMemcpy(x_d, x_h, sizeof(double)*N*C*H*W, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(dy_d, dy_h, sizeof(double)*N*K*Hy*Wy, cudaMemcpyHostToDevice) );
// end send x, dy to GPU
//gpuErrchk( cudaMemset(y_d, 0, sizeof(double)*N*K*Hy*Wy) );
//print2Dd(y_h, Hy, Wy);
/**
CUDNN KUNET COMPARISON TESTS
**/
cudnnHandle_t handle = NULL;
cudnnTensorDescriptor_t xDesc = NULL;
cudnnTensorDescriptor_t dxDesc = NULL;
cudnnTensorDescriptor_t yDesc = NULL;
cudnnTensorDescriptor_t dyDesc = NULL;
cudnnPoolingDescriptor_t maxPool00Desc = NULL;
// creation
cudnnErrchk( cudnnCreate( &handle) );
cudnnErrchk( cudnnCreateTensorDescriptor( &xDesc) );
cudnnErrchk( cudnnCreateTensorDescriptor( &dxDesc) );
cudnnErrchk( cudnnCreateTensorDescriptor( &yDesc) );
cudnnErrchk( cudnnCreateTensorDescriptor( &dyDesc) );
cudnnErrchk( cudnnCreatePoolingDescriptor( &maxPool00Desc) );
// end creation
// set
cudnnErrchk( cudnnSetTensor4dDescriptor(xDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, C, H, W) );
cudnnErrchk( cudnnSetTensor4dDescriptor(dxDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, C, H, W) );
cudnnErrchk( cudnnSetTensor4dDescriptor(yDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, K, Hy, Wy) );
cudnnErrchk( cudnnSetTensor4dDescriptor(dyDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, N, K, Hy, Wy) );
cudnnErrchk( cudnnSetPooling2dDescriptor(maxPool00Desc, CUDNN_POOLING_MAX, Hd,Wd,0,0,Hs,Ws) );
// end set input and conf
// set pool mode
cudnnPoolingDescriptor_t tpoolDesc = NULL;
tpoolDesc = maxPool00Desc;
printf("mode: maxPool00\n");
// end set pool mode
double alpha=1, beta=1;
// forward test
printf("y:\n");
t0 = getTime();
cudnnErrchk( cudnnPoolingForward(handle, tpoolDesc, &alpha, xDesc, x_d, &beta, yDesc, y_d) );
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() );
time_elapsed = getTime() - t0; printf("cudnn: %.4f\n",time_elapsed);
gpuErrchk( cudaMemcpy(y_h, y_d, sizeof(double)*N*K*Hy*Wy, cudaMemcpyDeviceToHost) );
t0 = getTime();
cudnnErrchk( kunetPoolingForward(handle, tpoolDesc, &alpha, xDesc, x_d, &beta, yDesc, y_d) );
time_elapsed = getTime() - t0; printf("kudnn: %.4f\n",time_elapsed);
gpuErrchk( cudaMemcpy(y1_h, y_d, sizeof(double)*N*K*Hy*Wy, cudaMemcpyDeviceToHost) );
if(VERBOSE){print2Dd(y_h, Hy, Wy); printf("\n"); print2Dd(y1_h, Hy, Wy);}
assert(eqseq(y_h,y1_h,N*K*Hy*Wy) < 1.0E-4);
printf("y: ok.\n\n");
// end forward test
// backward test
printf("dx:\n");
t0 = getTime();
cudnnErrchk( cudnnPoolingBackward(handle, tpoolDesc, &alpha, yDesc, y_d, dyDesc, dy_d, xDesc, x_d, &beta, dxDesc, dx_d) );
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() );
time_elapsed = getTime() - t0; printf("cudnn: %.4f\n",time_elapsed);
gpuErrchk( cudaMemcpy(dx_h, dx_d, sizeof(double)*N*C*H*W, cudaMemcpyDeviceToHost) );
t0 = getTime();
cudnnErrchk( kunetPoolingBackward(handle, tpoolDesc, &alpha, yDesc, y_d, dyDesc, dy_d, xDesc, x_d, &beta, dxDesc, dx_d) );
time_elapsed = getTime() - t0; printf("kudnn: %.4f\n",time_elapsed);
gpuErrchk( cudaMemcpy(dx1_h, dx_d, sizeof(double)*N*C*H*W, cudaMemcpyDeviceToHost) );
if(VERBOSE){print2Dd(dx_h, H, W); printf("\n");print2Dd(dx1_h, H, W);}
assert(eqseq(dx_h,dx1_h,N*C*H*W) < 1.0E-4);
printf("dx:ok\n");
// end backward test
printf("ok.\n");
// destroy
if (xDesc != NULL) cudnnDestroyTensorDescriptor(xDesc);
if (dxDesc != NULL) cudnnDestroyTensorDescriptor(dxDesc);
if (dyDesc != NULL) cudnnDestroyTensorDescriptor(dyDesc);
if (maxPool00Desc != NULL) cudnnDestroyPoolingDescriptor(maxPool00Desc);
if (handle != NULL) cudnnDestroy(handle);
// free
cudaFree(x_d); cudaFree(y_d);
cudaFree(dx_d); cudaFree(dy_d);
// END TESTS
return 0;
}
|
577d6b07a75c2d83ac342b31e60c6fc1855b4146.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
// Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
// #include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
#if CUDART_VERSION < 3000
struct double3
{
double x, y, z;
};
#endif
/*
* Options
*
*/
#define GAMMA 1.4
#define iterations 2000
#ifndef block_length
#define block_length 128
#endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2
#define deg_angle_of_attack 0.0
/*
* not options
*/
#if block_length > 128
#warning "the kernels may fail too launch on some systems if the block length is too large"
#endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
/*
* Generic functions
*/
template <typename T>
T* alloc(int N)
{
T* t;
checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
checkCudaErrors(hipFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice));
}
template <typename T>
void upload(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice));
}
template <typename T>
void download(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost));
}
void dump(double* variables, int nel, int nelr)
{
double* h_variables = new double[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ double ff_variable[NVAR];
__constant__ double3 ff_flux_contribution_momentum_x[1];
__constant__ double3 ff_flux_contribution_momentum_y[1];
__constant__ double3 ff_flux_contribution_momentum_z[1];
__constant__ double3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, double* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, double* variables)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg, Db);
hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables);
freeReadWriteSets(Dg, Db);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s Initializing variables\n", hipGetErrorString(error));
exit(-1);
}
}
__device__ __host__ inline void compute_flux_contribution(double& density, double3& momentum, double& density_energy, double& pressure, double3& velocity, double3& fc_momentum_x, double3& fc_momentum_y, double3& fc_momentum_z, double3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
double de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(double& density, double3& momentum, double3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline double compute_speed_sqd(double3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline double compute_pressure(double& density, double& density_energy, double& speed_sqd)
{
return (double(GAMMA)-double(1.0))*(density_energy - double(0.5)*density*speed_sqd);
}
__device__ inline double compute_speed_of_sound(double& density, double& pressure)
{
return sqrt(double(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, double* variables, double* areas, double* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
double density = variables[i + VAR_DENSITY*nelr];
double3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
double density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
double3 velocity; compute_velocity(density, momentum, velocity);
double speed_sqd = compute_speed_sqd(velocity);
double pressure = compute_pressure(density, density_energy, speed_sqd);
double speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = double(0.5) * sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = double(0.5) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, double* variables, double* areas, double* step_factors)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg, Db);
hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors);
freeReadWriteSets(Dg, Db);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error));
exit(-1);
}
}
/*
*
*
*/
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes)
{
const double smoothing_coefficient = double(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
double3 normal; double normal_len;
double factor;
double density_i = variables[i + VAR_DENSITY*nelr];
double3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
double speed_sqd_i = compute_speed_sqd(velocity_i);
double speed_i = sqrt(speed_sqd_i);
double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
double3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
double3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
double flux_i_density = double(0.0);
double3 flux_i_momentum;
flux_i_momentum.x = double(0.0);
flux_i_momentum.y = double(0.0);
flux_i_momentum.z = double(0.0);
double flux_i_density_energy = double(0.0);
double3 velocity_nb;
double density_nb, density_energy_nb;
double3 momentum_nb;
double3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
double3 flux_contribution_nb_density_energy;
double speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = double(0.5)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = double(0.5)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = double(0.5)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = double(0.5)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = double(0.5)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = double(0.5)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
void compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg,Db);
hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes);
freeReadWriteSets(Dg,Db);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error));
exit(-1);
}
}
__global__ void cuda_time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
double factor = step_factors[i]/double(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg,Db);
hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes);
freeReadWriteSets(Dg,Db);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s update failed\n", hipGetErrorString(error));
exit(-1);
}
}
/*
* Main function
*/
int main(int argc, char** argv)
{
if (argc < 2)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
hipDeviceProp_t prop;
int dev;
// CUDA_SAFE_CALL(hipSetDevice(0));
// CUDA_SAFE_CALL(hipGetDevice(&dev));
// CUDA_SAFE_CALL(hipGetDeviceProperties(&prop, dev));
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipGetDevice(&dev));
checkCudaErrors(hipGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
double h_ff_variable[NVAR];
const double angle_of_attack = double(3.1415926535897931 / 180.0) * double(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = double(1.4);
double ff_pressure = double(1.0);
double ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
double ff_speed = double(ff_mach)*ff_speed_of_sound;
double3 ff_velocity;
ff_velocity.x = ff_speed*double(cos((double)angle_of_attack));
ff_velocity.y = ff_speed*double(sin((double)angle_of_attack));
ff_velocity.z = 0.0;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(double(0.5)*(ff_speed*ff_speed)) + (ff_pressure / double(GAMMA-1.0));
double3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
double3 h_ff_flux_contribution_momentum_x;
double3 h_ff_flux_contribution_momentum_y;
double3 h_ff_flux_contribution_momentum_z;
double3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(double)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(double3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(double3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(double3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(double3)) );
}
int nel;
int nelr;
// read in domain geometry
double* areas;
int* elements_surrounding_elements;
double* normals;
{
std::ifstream file(data_file_name);
file >> nel;
nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length));
double* h_areas = new double[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
double* h_normals = new double[nelr*NDIM*NNB];
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
areas = alloc<double>(nelr);
upload<double>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
normals = alloc<double>(nelr*NDIM*NNB);
upload<double>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
double* variables = alloc<double>(nelr*NVAR);
initialize_variables(nelr, variables);
double* old_variables = alloc<double>(nelr*NVAR);
double* fluxes = alloc<double>(nelr*NVAR);
double* step_factors = alloc<double>(nelr);
// make sure all memory is doublely allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
hipMemset( (void*) step_factors, 0, sizeof(double)*nelr );
// make sure CUDA isn't still doing something before we start timing
hipDeviceSynchronize();
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
hipError_t error;
StopWatchInterface *timer = NULL;
sdkCreateTimer( &timer);
sdkStartTimer( &timer);
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<double>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error));
exit(-1);
}
for(int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error));
exit(-1);
}
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr,"GPUassert: %s time_step failed\n", hipGetErrorString(error));
exit(-1);
}
}
}
hipDeviceSynchronize();
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
std::cout << "Saving solution..." << std::endl;
dump(variables, nel, nelr);
std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
dealloc<double>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<double>(normals);
dealloc<double>(variables);
dealloc<double>(old_variables);
dealloc<double>(fluxes);
dealloc<double>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
| 577d6b07a75c2d83ac342b31e60c6fc1855b4146.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
// Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
// #include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
#if CUDART_VERSION < 3000
struct double3
{
double x, y, z;
};
#endif
/*
* Options
*
*/
#define GAMMA 1.4
#define iterations 2000
#ifndef block_length
#define block_length 128
#endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2
#define deg_angle_of_attack 0.0
/*
* not options
*/
#if block_length > 128
#warning "the kernels may fail too launch on some systems if the block length is too large"
#endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
/*
* Generic functions
*/
template <typename T>
T* alloc(int N)
{
T* t;
checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
checkCudaErrors(cudaFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice));
}
template <typename T>
void upload(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice));
}
template <typename T>
void download(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost));
}
void dump(double* variables, int nel, int nelr)
{
double* h_variables = new double[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ double ff_variable[NVAR];
__constant__ double3 ff_flux_contribution_momentum_x[1];
__constant__ double3 ff_flux_contribution_momentum_y[1];
__constant__ double3 ff_flux_contribution_momentum_z[1];
__constant__ double3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, double* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, double* variables)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg, Db);
cuda_initialize_variables<<<Dg, Db>>>(nelr, variables);
freeReadWriteSets(Dg, Db);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s Initializing variables\n", cudaGetErrorString(error));
exit(-1);
}
}
__device__ __host__ inline void compute_flux_contribution(double& density, double3& momentum, double& density_energy, double& pressure, double3& velocity, double3& fc_momentum_x, double3& fc_momentum_y, double3& fc_momentum_z, double3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
double de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(double& density, double3& momentum, double3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline double compute_speed_sqd(double3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline double compute_pressure(double& density, double& density_energy, double& speed_sqd)
{
return (double(GAMMA)-double(1.0))*(density_energy - double(0.5)*density*speed_sqd);
}
__device__ inline double compute_speed_of_sound(double& density, double& pressure)
{
return sqrt(double(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, double* variables, double* areas, double* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
double density = variables[i + VAR_DENSITY*nelr];
double3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
double density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
double3 velocity; compute_velocity(density, momentum, velocity);
double speed_sqd = compute_speed_sqd(velocity);
double pressure = compute_pressure(density, density_energy, speed_sqd);
double speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = double(0.5) * sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = double(0.5) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, double* variables, double* areas, double* step_factors)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg, Db);
cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors);
freeReadWriteSets(Dg, Db);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error));
exit(-1);
}
}
/*
*
*
*/
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes)
{
const double smoothing_coefficient = double(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
double3 normal; double normal_len;
double factor;
double density_i = variables[i + VAR_DENSITY*nelr];
double3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
double speed_sqd_i = compute_speed_sqd(velocity_i);
double speed_i = sqrt(speed_sqd_i);
double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
double3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
double3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
double flux_i_density = double(0.0);
double3 flux_i_momentum;
flux_i_momentum.x = double(0.0);
flux_i_momentum.y = double(0.0);
flux_i_momentum.z = double(0.0);
double flux_i_density_energy = double(0.0);
double3 velocity_nb;
double density_nb, density_energy_nb;
double3 momentum_nb;
double3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
double3 flux_contribution_nb_density_energy;
double speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = double(0.5)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = double(0.5)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = double(0.5)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = double(0.5)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = double(0.5)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = double(0.5)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
void compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg,Db);
cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes);
freeReadWriteSets(Dg,Db);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error));
exit(-1);
}
}
__global__ void cuda_time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
double factor = step_factors[i]/double(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes)
{
dim3 Dg(nelr / block_length), Db(block_length);
allocateReadWriteSets(Dg,Db);
cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes);
freeReadWriteSets(Dg,Db);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s update failed\n", cudaGetErrorString(error));
exit(-1);
}
}
/*
* Main function
*/
int main(int argc, char** argv)
{
if (argc < 2)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
cudaDeviceProp prop;
int dev;
// CUDA_SAFE_CALL(cudaSetDevice(0));
// CUDA_SAFE_CALL(cudaGetDevice(&dev));
// CUDA_SAFE_CALL(cudaGetDeviceProperties(&prop, dev));
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaGetDevice(&dev));
checkCudaErrors(cudaGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
double h_ff_variable[NVAR];
const double angle_of_attack = double(3.1415926535897931 / 180.0) * double(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = double(1.4);
double ff_pressure = double(1.0);
double ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
double ff_speed = double(ff_mach)*ff_speed_of_sound;
double3 ff_velocity;
ff_velocity.x = ff_speed*double(cos((double)angle_of_attack));
ff_velocity.y = ff_speed*double(sin((double)angle_of_attack));
ff_velocity.z = 0.0;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(double(0.5)*(ff_speed*ff_speed)) + (ff_pressure / double(GAMMA-1.0));
double3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
double3 h_ff_flux_contribution_momentum_x;
double3 h_ff_flux_contribution_momentum_y;
double3 h_ff_flux_contribution_momentum_z;
double3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(double)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(double3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(double3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(double3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(double3)) );
}
int nel;
int nelr;
// read in domain geometry
double* areas;
int* elements_surrounding_elements;
double* normals;
{
std::ifstream file(data_file_name);
file >> nel;
nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length));
double* h_areas = new double[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
double* h_normals = new double[nelr*NDIM*NNB];
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
areas = alloc<double>(nelr);
upload<double>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
normals = alloc<double>(nelr*NDIM*NNB);
upload<double>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
double* variables = alloc<double>(nelr*NVAR);
initialize_variables(nelr, variables);
double* old_variables = alloc<double>(nelr*NVAR);
double* fluxes = alloc<double>(nelr*NVAR);
double* step_factors = alloc<double>(nelr);
// make sure all memory is doublely allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
cudaMemset( (void*) step_factors, 0, sizeof(double)*nelr );
// make sure CUDA isn't still doing something before we start timing
cudaThreadSynchronize();
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
cudaError_t error;
StopWatchInterface *timer = NULL;
sdkCreateTimer( &timer);
sdkStartTimer( &timer);
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<double>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error));
exit(-1);
}
for(int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error));
exit(-1);
}
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s time_step failed\n", cudaGetErrorString(error));
exit(-1);
}
}
}
cudaThreadSynchronize();
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
std::cout << "Saving solution..." << std::endl;
dump(variables, nel, nelr);
std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
dealloc<double>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<double>(normals);
dealloc<double>(variables);
dealloc<double>(old_variables);
dealloc<double>(fluxes);
dealloc<double>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
|
c8bdc3226870dadc5b0ab89735ca704714f370cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
// // namespace imgproc
// {
__device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){
int tx = threadIdx.x;
if (tx<8){
int temp=H[tx];
for(int i=0; i<histCount; i++)
temp+=hist_col[(i<<3)+tx];
H[tx]=temp;
}
}
__device__ void histogramClear8(int* H){
int tx = threadIdx.x;
if (tx<8){
H[tx]=0;
}
}
__device__ void histogramAdd8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramSub8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]-=hist_col[tx];
}
}
__device__ void histogramAdd32(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramClear32(int* H){
int tx = threadIdx.x;
if (tx<32){
H[tx]=0;
}
}
__device__ void lucClear8(int* luc){
int tx = threadIdx.x;
if (tx<8)
luc[tx]=0;
}
__device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<8){
Hscan[tx]=H[tx];
}
__syncthreads();
if (1 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if (2 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if (4 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if(tx<7){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<32){
Hscan[tx]=H[tx];
}
__syncthreads();
if ( 1 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if ( 2 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if ( 4 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if ( 8 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-8];
__syncthreads();
if ( 16 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-16];
__syncthreads();
if(tx<31){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_)
{
__shared__ int HCoarse[8];
__shared__ int HCoarseScan[32];
__shared__ int HFine[8][32];
__shared__ int luc[8];
__shared__ int firstBin,countAtMed, retval;
int rows = src.rows, cols=src.cols;
int extraRowThread=rows%gridDim.x;
int doExtraRow=blockIdx.x<extraRowThread;
int startRow=0, stopRow=0;
int rowsPerBlock= rows/gridDim.x+doExtraRow;
// The following code partitions the work to the blocks. Some blocks will do one row more
// than other blocks. This code is responsible for doing that balancing
if(doExtraRow){
startRow=rowsPerBlock*blockIdx.x;
stopRow=::min(rows, startRow+rowsPerBlock);
}
else{
startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread);
stopRow=::min(rows, startRow+rowsPerBlock);
}
int* hist= histPar.data+cols*256*blockIdx.x;
int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x;
if (blockIdx.x==(gridDim.x-1))
stopRow=rows;
__syncthreads();
int initNeeded=0, initVal, initStartRow, initStopRow;
if(blockIdx.x==0){
initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r;
}
else if (startRow<(r+2)){
initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow;
}
else{
initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow;
}
__syncthreads();
// In the original algorithm an initialization phase was required as part of the window was outside the
// image. In this parallel version, the initializtion is required for all thread blocks that part
// of the median filter is outside the window.
// For all threads in the block the same code will be executed.
if (initNeeded){
for (int j=threadIdx.x; j<(cols); j+=blockDim.x){
hist[j*256+src.ptr(0)[j]]=initVal;
histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal;
}
}
__syncthreads();
// For all remaining rows in the median filter, add the values to the the histogram
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
for(int i=initStartRow; i<initStopRow; i++){
int pos=::min(i,rows-1);
hist[j*256+src.ptr(pos)[j]]++;
histCoarse[j*8+(src.ptr(pos)[j]>>5)]++;
}
}
__syncthreads();
// Going through all the rows that the block is responsible for.
int inc=blockDim.x*256;
int incCoarse=blockDim.x*8;
for(int i=startRow; i< stopRow; i++){
// For every new row that is started the global histogram for the entire window is restarted.
histogramClear8(HCoarse);
lucClear8(luc);
// Computing some necessary indices
int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r);
int histPos=threadIdx.x*256;
int histCoarsePos=threadIdx.x*8;
// Going through all the elements of a specific row. Foeach histogram, a value is taken out and
// one value is added.
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
hist[histPos+ src.ptr(possub)[j] ]--;
hist[histPos+ src.ptr(posadd)[j] ]++;
histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--;
histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++;
histPos+=inc;
histCoarsePos+=incCoarse;
}
__syncthreads();
histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1);
// __syncthreads();
int cols_m_1=cols-1;
for(int j=r;j<cols-r;j++){
int possub=::max(j-r,0);
int posadd=::min(j+1+r,cols_m_1);
int medPos=medPos_;
__syncthreads();
histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed);
__syncthreads();
if ( luc[firstBin] <= (j-r))
{
histogramClear32(HFine[firstBin]);
for ( luc[firstBin] = j-r; luc[firstBin] < ::min(j+r+1,cols); luc[firstBin]++ ){
histogramAdd32(HFine[firstBin], hist+(luc[firstBin]*256+(firstBin<<5) ) );
}
}
else{
for ( ; luc[firstBin] < (j+r+1);luc[firstBin]++ ) {
histogramAddAndSub32(HFine[firstBin],
hist+(::min(luc[firstBin],cols_m_1)*256+(firstBin<<5) ),
hist+(::max(luc[firstBin]-2*r-1,0)*256+(firstBin<<5) ) );
__syncthreads();
}
}
__syncthreads();
int leftOver=medPos-countAtMed;
if(leftOver>=0){
histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed);
}
else retval=0;
__syncthreads();
if (threadIdx.x==0){
dest.ptr(i)[j]=(firstBin<<5) + retval;
}
histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3));
__syncthreads();
}
__syncthreads();
}
}
void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,hipStream_t stream){
int medPos=2*kernel*kernel+2*kernel;
dim3 gridDim; gridDim.x=partitions;
dim3 blockDim; blockDim.x=32;
hipLaunchKernelGGL(( cuMedianFilterMultiBlock), dim3(gridDim),dim3(blockDim),0, stream, src, dst, devHist,devCoarseHist, kernel, medPos);
if (!stream)
cudaSafeCall( hipDeviceSynchronize() );
}
}}}
#endif
| c8bdc3226870dadc5b0ab89735ca704714f370cb.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
// // namespace imgproc
// {
__device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){
int tx = threadIdx.x;
if (tx<8){
int temp=H[tx];
for(int i=0; i<histCount; i++)
temp+=hist_col[(i<<3)+tx];
H[tx]=temp;
}
}
__device__ void histogramClear8(int* H){
int tx = threadIdx.x;
if (tx<8){
H[tx]=0;
}
}
__device__ void histogramAdd8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramSub8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]-=hist_col[tx];
}
}
__device__ void histogramAdd32(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramClear32(int* H){
int tx = threadIdx.x;
if (tx<32){
H[tx]=0;
}
}
__device__ void lucClear8(int* luc){
int tx = threadIdx.x;
if (tx<8)
luc[tx]=0;
}
__device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<8){
Hscan[tx]=H[tx];
}
__syncthreads();
if (1 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if (2 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if (4 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if(tx<7){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<32){
Hscan[tx]=H[tx];
}
__syncthreads();
if ( 1 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if ( 2 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if ( 4 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if ( 8 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-8];
__syncthreads();
if ( 16 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-16];
__syncthreads();
if(tx<31){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_)
{
__shared__ int HCoarse[8];
__shared__ int HCoarseScan[32];
__shared__ int HFine[8][32];
__shared__ int luc[8];
__shared__ int firstBin,countAtMed, retval;
int rows = src.rows, cols=src.cols;
int extraRowThread=rows%gridDim.x;
int doExtraRow=blockIdx.x<extraRowThread;
int startRow=0, stopRow=0;
int rowsPerBlock= rows/gridDim.x+doExtraRow;
// The following code partitions the work to the blocks. Some blocks will do one row more
// than other blocks. This code is responsible for doing that balancing
if(doExtraRow){
startRow=rowsPerBlock*blockIdx.x;
stopRow=::min(rows, startRow+rowsPerBlock);
}
else{
startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread);
stopRow=::min(rows, startRow+rowsPerBlock);
}
int* hist= histPar.data+cols*256*blockIdx.x;
int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x;
if (blockIdx.x==(gridDim.x-1))
stopRow=rows;
__syncthreads();
int initNeeded=0, initVal, initStartRow, initStopRow;
if(blockIdx.x==0){
initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r;
}
else if (startRow<(r+2)){
initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow;
}
else{
initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow;
}
__syncthreads();
// In the original algorithm an initialization phase was required as part of the window was outside the
// image. In this parallel version, the initializtion is required for all thread blocks that part
// of the median filter is outside the window.
// For all threads in the block the same code will be executed.
if (initNeeded){
for (int j=threadIdx.x; j<(cols); j+=blockDim.x){
hist[j*256+src.ptr(0)[j]]=initVal;
histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal;
}
}
__syncthreads();
// For all remaining rows in the median filter, add the values to the the histogram
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
for(int i=initStartRow; i<initStopRow; i++){
int pos=::min(i,rows-1);
hist[j*256+src.ptr(pos)[j]]++;
histCoarse[j*8+(src.ptr(pos)[j]>>5)]++;
}
}
__syncthreads();
// Going through all the rows that the block is responsible for.
int inc=blockDim.x*256;
int incCoarse=blockDim.x*8;
for(int i=startRow; i< stopRow; i++){
// For every new row that is started the global histogram for the entire window is restarted.
histogramClear8(HCoarse);
lucClear8(luc);
// Computing some necessary indices
int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r);
int histPos=threadIdx.x*256;
int histCoarsePos=threadIdx.x*8;
// Going through all the elements of a specific row. Foeach histogram, a value is taken out and
// one value is added.
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
hist[histPos+ src.ptr(possub)[j] ]--;
hist[histPos+ src.ptr(posadd)[j] ]++;
histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--;
histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++;
histPos+=inc;
histCoarsePos+=incCoarse;
}
__syncthreads();
histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1);
// __syncthreads();
int cols_m_1=cols-1;
for(int j=r;j<cols-r;j++){
int possub=::max(j-r,0);
int posadd=::min(j+1+r,cols_m_1);
int medPos=medPos_;
__syncthreads();
histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed);
__syncthreads();
if ( luc[firstBin] <= (j-r))
{
histogramClear32(HFine[firstBin]);
for ( luc[firstBin] = j-r; luc[firstBin] < ::min(j+r+1,cols); luc[firstBin]++ ){
histogramAdd32(HFine[firstBin], hist+(luc[firstBin]*256+(firstBin<<5) ) );
}
}
else{
for ( ; luc[firstBin] < (j+r+1);luc[firstBin]++ ) {
histogramAddAndSub32(HFine[firstBin],
hist+(::min(luc[firstBin],cols_m_1)*256+(firstBin<<5) ),
hist+(::max(luc[firstBin]-2*r-1,0)*256+(firstBin<<5) ) );
__syncthreads();
}
}
__syncthreads();
int leftOver=medPos-countAtMed;
if(leftOver>=0){
histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed);
}
else retval=0;
__syncthreads();
if (threadIdx.x==0){
dest.ptr(i)[j]=(firstBin<<5) + retval;
}
histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3));
__syncthreads();
}
__syncthreads();
}
}
void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,cudaStream_t stream){
int medPos=2*kernel*kernel+2*kernel;
dim3 gridDim; gridDim.x=partitions;
dim3 blockDim; blockDim.x=32;
cuMedianFilterMultiBlock<<<gridDim,blockDim,0, stream>>>(src, dst, devHist,devCoarseHist, kernel, medPos);
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}}}
#endif
|
c1e0e1d1dca531368ca5ff165d1930ed40f5c914.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
}
| c1e0e1d1dca531368ca5ff165d1930ed40f5c914.cu | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
}
|
e84459c5406f9ecceb9d1ec489bc9e87ed610194.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
hipMemcpy( d_a, &a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, &b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( add), dim3(1), dim3(1) , 0, 0, d_a, d_b, d_c );
/* copy result back to host */
hipMemcpy( &c, d_c, size, hipMemcpyDeviceToHost );
printf("value of c after kernel is %d\n",c);
/* clean up */
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| e84459c5406f9ecceb9d1ec489bc9e87ed610194.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
cudaMemcpy( d_a, &a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, &b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
add<<< 1, 1 >>>( d_a, d_b, d_c );
/* copy result back to host */
cudaMemcpy( &c, d_c, size, cudaMemcpyDeviceToHost );
printf("value of c after kernel is %d\n",c);
/* clean up */
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
9bf02a848ac428de712e2f9e14f5d31a790faa1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 2000
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
//~ #define NNB 4
#define NNB 2
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
/*
* Generic functions
*/
template <typename T>
T* alloc(int N)
{
T* t;
checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
checkCudaErrors(hipFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice));
}
template <typename T>
void upload(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice));
}
template <typename T>
void download(T* dst, T* src, int N)
{
checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost));
}
void dump(float* variables, int nel, int nelr)
{
float* h_variables = new float[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float* variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables);
getLastCudaError("initialize_variables failed");
}
__device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY*nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity; compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
/*
*
*
*/
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
//~ for(j = 1; j < 4; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = float(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
}
__global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float factor = step_factors[i]/float(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
//~ variables[i + (VAR_MOMENTUM+0)*nelr] = fluxes[i + (VAR_MOMENTUM+0)*nelr];// old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
//~ variables[i + (VAR_MOMENTUM+1)*nelr] = fluxes[i + (VAR_MOMENTUM+1)*nelr];//old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
//~ variables[i + (VAR_MOMENTUM+2)*nelr] = fluxes[i + (VAR_MOMENTUM+2)*nelr];//old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
/*
* Main function
*/
int main(int argc, char** argv)
{
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 2)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
hipDeviceProp_t prop;
int dev;
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipGetDevice(&dev));
checkCudaErrors(hipGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) );
checkCudaErrors( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) );
}
int nel;
int nelr;
// read in domain geometry
float* areas;
int* elements_surrounding_elements;
float* normals;
{
std::ifstream file(data_file_name);
file >> nel;
nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ ::min(1, nel % BLOCK_SIZE_0));
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
areas = alloc<float>(nelr);
upload<float>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
normals = alloc<float>(nelr*NDIM*NNB);
upload<float>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
float* variables = alloc<float>(nelr*NVAR);
initialize_variables(nelr, variables);
float* old_variables = alloc<float>(nelr*NVAR);
float* fluxes = alloc<float>(nelr*NVAR);
float* step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
hipMemset( (void*) step_factors, 0, sizeof(float)*nelr );
// make sure CUDA isn't still doing something before we start timing
hipDeviceSynchronize();
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
StopWatchInterface *timer = 0;
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
//~ int j = 0;
for(int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
}
hipDeviceSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
std::cout << "Saving solution..." << std::endl;
dump(variables, nel, nelr);
std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
dealloc<float>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<float>(normals);
dealloc<float>(variables);
dealloc<float>(old_variables);
dealloc<float>(fluxes);
dealloc<float>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
| 9bf02a848ac428de712e2f9e14f5d31a790faa1f.cu | // Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 2000
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
//~ #define NNB 4
#define NNB 2
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
/*
* Generic functions
*/
template <typename T>
T* alloc(int N)
{
T* t;
checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
checkCudaErrors(cudaFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice));
}
template <typename T>
void upload(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice));
}
template <typename T>
void download(T* dst, T* src, int N)
{
checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost));
}
void dump(float* variables, int nel, int nelr)
{
float* h_variables = new float[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float* variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
cuda_initialize_variables<<<Dg, Db>>>(nelr, variables);
getLastCudaError("initialize_variables failed");
}
__device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY*nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity; compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
/*
*
*
*/
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
//~ for(j = 1; j < 4; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = float(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
}
__global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float factor = step_factors[i]/float(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
//~ variables[i + (VAR_MOMENTUM+0)*nelr] = fluxes[i + (VAR_MOMENTUM+0)*nelr];// old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
//~ variables[i + (VAR_MOMENTUM+1)*nelr] = fluxes[i + (VAR_MOMENTUM+1)*nelr];//old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
//~ variables[i + (VAR_MOMENTUM+2)*nelr] = fluxes[i + (VAR_MOMENTUM+2)*nelr];//old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
/*
* Main function
*/
int main(int argc, char** argv)
{
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 2)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
cudaDeviceProp prop;
int dev;
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaGetDevice(&dev));
checkCudaErrors(cudaGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) );
checkCudaErrors( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) );
}
int nel;
int nelr;
// read in domain geometry
float* areas;
int* elements_surrounding_elements;
float* normals;
{
std::ifstream file(data_file_name);
file >> nel;
nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ std::min(1, nel % BLOCK_SIZE_0));
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
areas = alloc<float>(nelr);
upload<float>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
normals = alloc<float>(nelr*NDIM*NNB);
upload<float>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
float* variables = alloc<float>(nelr*NVAR);
initialize_variables(nelr, variables);
float* old_variables = alloc<float>(nelr*NVAR);
float* fluxes = alloc<float>(nelr*NVAR);
float* step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr );
// make sure CUDA isn't still doing something before we start timing
cudaThreadSynchronize();
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
StopWatchInterface *timer = 0;
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
//~ int j = 0;
for(int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
}
cudaThreadSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
std::cout << "Saving solution..." << std::endl;
dump(variables, nel, nelr);
std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
dealloc<float>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<float>(normals);
dealloc<float>(variables);
dealloc<float>(old_variables);
dealloc<float>(fluxes);
dealloc<float>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
|
5900be9a147c6212a0c5acf94db232e18d215166.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Transpose.cu
// MNN
//
// Created by MNN on b'2021/12/09'.
// Copyright 2018, Alibaba Group Holding Limited
//
#include "Transpose_hip.cuh"
#include "core/Macro.h"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
template<typename T0, typename T1>
__global__ void UNPACKCOMMON_4(const T0 *input, T1 *output,
const int total, int inside, int axis, int outside,
int insideStride, int axisStride, int axisAlign,
DivModFast is, DivModFast cs
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmp, x, y, z;
cs.divmod(i, tmp, y);
is.divmod(tmp, z, x);
if (y < axis) {
int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N
int dstOffset = x * insideStride + y * axisStride + z * inside * axis;
output[dstOffset] = input[srcOffset];
}
}
}
template<typename T0, typename T1>
__global__ void UNPACKCOMMON(const T0 *input, T1 *output,
int inside, int axis, int outside,
int insideStride, int axisStride
) {
int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;;
int total = axisAlign * inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmpI = i / axisAlign;
int y = i % axisAlign;
int x = tmpI % inside;
int z = tmpI / inside;
int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N
int dstOffset = x * insideStride + y * axisStride + z * inside * axis;
if (y < axis) {
output[dstOffset] = input[srcOffset];
}
}
}
template<typename T0, typename T1>
__global__ void PACKCOMMON_4(const T0 *input, T1 *output,
int inside, int axis, int outside,
int insideStride, int axisStride,
DivModFast is, DivModFast cs
) {
int axisAlign = UP_DIV(axis, PACK_NUMBER/ 4) * PACK_NUMBER / 4;;
int total = axisAlign * inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmp, x, y, z;
cs.divmod(i, tmp, y);
is.divmod(tmp, z, x);
int dstOffset = (z * inside + x) * axisAlign + y;
int srcOffset = x * insideStride + y * axisStride + z * inside * axis;
if (y < axis) {
output[dstOffset] = input[srcOffset];
} else {
output[dstOffset] = {0, 0, 0, 0};
}
}
}
template<typename T0, typename T1>
__global__ void PACKCOMMON(const T0 *input, T1 *output,
int inside, int axis, int outside,
int insideStride, int axisStride
) {
int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;;
int total = axisAlign * inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmpI = i / axisAlign;
int y = i % axisAlign;
int x = tmpI % inside;
int z = tmpI / inside;
int dstOffset = (z * inside + x) * axisAlign + y;
int srcOffset = x * insideStride + y * axisStride + z * inside * axis;
if (y < axis) {
output[dstOffset] = input[srcOffset];
} else {
output[dstOffset] = 0.0;
}
}
}
void PackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
if (info->axis % 4 == 0 && info->axisStride == 1 && \
bytes == 4 && info->insideStride == info->axis) {
int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4;
DivModFast is(info->inside);
DivModFast cs(axis_pack);
hipLaunchKernelGGL(( PACKCOMMON_4), dim3(cores), dim3(threadNumbers), 0, 0, (const int4*)input, (int4*)output,
info->inside, info->axis / 4, info->outside,
info->insideStride / 4, info->axisStride, is, cs);
return;
}
switch (bytes) {
case 4:
hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 2:
hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 1:
hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const int8_t*)input, (int8_t*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
default:
break;
}
}
void UnpackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
if (info->axis % 4 == 0 && info->axisStride == 1 && bytes == 4 && info->insideStride == info->axis) {
int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4;
DivModFast is(info->inside);
DivModFast cs(axis_pack);
const int maxCount = info->inside * axis_pack * info->outside;
int block_num = runtime->blocks_num(maxCount);
int block_size = runtime->threads_num();
int axisAlign = UP_DIV(info->axis / 4, PACK_NUMBER / 4) * PACK_NUMBER / 4;;
hipLaunchKernelGGL(( UNPACKCOMMON_4), dim3(block_num), dim3(block_size), 0, 0, (const int4*)input, (int4*)output,
maxCount, info->inside, info->axis / 4, info->outside,
info->insideStride / 4, info->axisStride, axisAlign, is, cs);
return;
}
switch (bytes) {
case 4:
hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 2:
hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 1:
hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const int8_t*)input, (int8_t*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
default:
break;
}
}
void PackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
void PackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
void UnpackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
void UnpackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
template<typename T>
__global__ void TRANSPOSE(const T *input, T *output, const TransposeParam* param) {
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < param->total) {
int x = i % param->dims[0];
int tmp = i / param->dims[0];
int y = tmp % param->dims[1];
int z = tmp / param->dims[1];
int srcOffset = param->srcStride * z + y + x * param->dims[2];
int dstOffset = param->dstStride * z + x + y * param->dims[3];
output[dstOffset] = input[srcOffset];
}
}
#define LOCAL_DIM 8
template <typename T>
__global__ void TRANSPOSE_LOCAL(const T* input, T *output, const TransposeParam* param) {
__shared__ T localM[LOCAL_DIM][LOCAL_DIM + 1];
int num = blockIdx.z;
for (int n = num; n < param->size; n += gridDim.z) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < param->dims[0] && y < param->dims[1]) {
int offset = n * param->srcStride + x * param->dims[2] + y;
localM[threadIdx.y][threadIdx.x] = input[offset];
}
__syncthreads();
x = blockIdx.y * blockDim.y + threadIdx.x;
y = blockIdx.x * blockDim.x + threadIdx.y;
if (x < param->dims[1] && y < param->dims[0]) {
int offset = n * param->dstStride + x * param->dims[3] + y;
output[offset] = localM[threadIdx.x][threadIdx.y];
}
}
}
void Transpose(uint8_t* output, const uint8_t* input, const TransposeParam* cpuParam, const TransposeParam* gpuRegion, int bytes, CUDARuntime* runtime) {
int count = cpuParam->total;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto out = output + bytes * cpuParam->dstOffset;
auto inp = input + bytes * cpuParam->srcOffset;
if (runtime->prop().maxThreadsPerBlock >= LOCAL_DIM * LOCAL_DIM && (cpuParam->dims[0] >= LOCAL_DIM || cpuParam->dims[1] >= LOCAL_DIM)) {
dim3 localSize(LOCAL_DIM, LOCAL_DIM, 1);
//printf("%d, %d - %d, %d - %d\n", cpuParam->size, cpuParam->dims[0], cpuParam->dims[1], cpuParam->dims[2], cpuParam->dims[3]);
int globalZ = ALIMIN(runtime->prop().multiProcessorCount, cpuParam->size);
dim3 globalSize(UP_DIV(cpuParam->dims[0], LOCAL_DIM), UP_DIV(cpuParam->dims[1], LOCAL_DIM), globalZ);
switch (bytes) {
case 4:
hipLaunchKernelGGL(( TRANSPOSE_LOCAL), dim3(globalSize), dim3(localSize), 0, 0, (const float *)inp, (float *)out, gpuRegion);
break;
case 2:
hipLaunchKernelGGL(( TRANSPOSE_LOCAL), dim3(globalSize), dim3(localSize), 0, 0, (const half *)inp, (half *)out, gpuRegion);
break;
case 1:
hipLaunchKernelGGL(( TRANSPOSE_LOCAL), dim3(globalSize), dim3(localSize), 0, 0, (const int8_t *)inp, (int8_t *)out, gpuRegion);
break;
default:
break;
}
return;
}
switch (bytes) {
case 4:
hipLaunchKernelGGL(( TRANSPOSE), dim3(block_num), dim3(threads_num), 0, 0, (int*)inp, (int*)out, gpuRegion);
break;
case 2:
hipLaunchKernelGGL(( TRANSPOSE), dim3(block_num), dim3(threads_num), 0, 0, (int16_t*)inp, (int16_t*)out, gpuRegion);
break;
case 1:
hipLaunchKernelGGL(( TRANSPOSE), dim3(block_num), dim3(threads_num), 0, 0, (int8_t*)inp, (int8_t*)out, gpuRegion);
break;
default:
break;
}
}
template<typename T0, typename T1>
__global__ void NCHW_2_NHWC8(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_ocp,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, chnlp_idx, batch_idx;
d_ocp.divmod(index, temp, chnlp_idx);
d_area.divmod(temp, batch_idx, area_idx);
if(chnlp_idx >= channel) {
output[index] = (T1)0.0f;
continue;
}
int src_offset = (batch_idx * channel + chnlp_idx) * area + area_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NCHW_2_NHWC(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_oc,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, chnl_idx, batch_idx;
d_oc.divmod(index, temp, chnl_idx);
d_area.divmod(temp, batch_idx, area_idx);
int src_offset = (batch_idx * channel + chnl_idx) * area + area_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NHWC_2_NHWC8(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_ocp,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, chnlp_idx, batch_idx;
d_ocp.divmod(index, temp, chnlp_idx);
d_area.divmod(temp, batch_idx, area_idx);
if(chnlp_idx >= channel) {
output[index] = (T1)0.0f;
continue;
}
int src_offset = (batch_idx * area + area_idx) * channel + chnlp_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NHWC8_2_NCHW(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_oc,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, channel_idx, batch_idx;
d_area.divmod(index, temp, area_idx);
d_oc.divmod(temp, batch_idx, channel_idx);
int src_offset = (batch_idx * area + area_idx) * channel_pack + channel_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NHWC8_2_NHWC(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_oc,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, channel_idx, batch_idx;
d_oc.divmod(index, temp, channel_idx);
d_area.divmod(temp, batch_idx, area_idx);
int src_offset = (batch_idx * area + area_idx) * channel_pack + channel_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NCHW_2_NCHW(const T0* input,
T1* output,
const int maxCount
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
output[index] = (T1)input[index];
}
}
template<typename T0, typename T1>
__global__ void C4NHW4_2_NHWC8(const T0* input,
T1* output,
const int maxCount,
const int batch,
const int area,
const int channel,
const int channel_pack
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int c_idx = index % channel_pack;
int temp = index / channel_pack;
int hw_idx = temp % area;
int batch_idx = temp / area;
if(c_idx >= channel) {
output[index] = (T1)0.0f;
continue;
}
int c4_idx = c_idx >> 2;
int cL_idx = c_idx & 3;
output[index] = (T1)input[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx];
}
}
template<typename T0, typename T1>
__global__ void NHWC8_2_C4NHW4(const T0* input,
T1* output,
const int maxCount,
const int batch,
const int channel,
const int area,
const int channel_pack
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int c_idx = index % channel_pack;
int temp = index / channel_pack;
int hw_idx = temp % area;
int batch_idx = temp / area;
int channel_8 = ((channel + 7) / 8) * 8;
int c4_idx = c_idx >> 2;
int cL_idx = c_idx & 3;
output[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx] =
(T1)input[(batch_idx * area + hw_idx) * channel_8 + c_idx];
}
}
template<class T0, class T1>
static void insideFormatConvert(T0* input, T1* output, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \
const int area, const int batch, const int channel) {
DivModFast d_oc(channel);
DivModFast d_ocp(UP_DIV(channel, 8) * 8);
DivModFast d_area(area);
if(srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NCHW_2_NHWC8<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_ocp, d_area);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NHWC && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NHWC_2_NHWC8<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_ocp, d_area);
checkKernelErrors;
return;
}
if((srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NCHW) || \
(srcDataFormat == MNN_DATA_FORMAT_NHWC && dstDataFormat == MNN_DATA_FORMAT_NHWC)) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NCHW_2_NCHW<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NCHW) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NHWC8_2_NCHW<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_oc, d_area);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NHWC) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NHWC8_2_NHWC<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_oc, d_area);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NHWC) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NCHW_2_NHWC<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_oc, d_area);
checkKernelErrors;
return;
}
MNN_PRINT("insideFormatConvert form %d to %d, not support\n", (int)srcDataFormat, (int)dstDataFormat);
}
void FormatConvert(void* output, void* input, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \
const int area, const int batch, const int channel, const Tensor* srcTensor, bool isFp16, bool srcDevice, bool dstDevice) {
//MNN_PRINT("FormatConvert size batch:%d - plane:%d - channel:%d, %d-%d, %d-%d\n", batch, area, channel, srcDataFormat, dstDataFormat, srcDevice, dstDevice);
if(batch == 0 || area == 0 || channel == 0) {
return;
}
if(srcTensor->getType().bits == 8) {
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
if(!srcDevice && dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( C4NHW4_2_NHWC8), dim3(block_num), dim3(block_size), 0, 0, (int8_t *)input, (int8_t *)output,
maxCount, batch, area, channel, UP_DIV(channel, 8) * 8);
checkKernelErrors;
return;
}
if(srcDevice && !dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 4) * 4;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
hipLaunchKernelGGL(( NHWC8_2_C4NHW4), dim3(block_num), dim3(block_size), 0, 0, (int8_t *)input, (int8_t *)output,
maxCount, batch, channel, area, UP_DIV(channel, 4) * 4);
checkKernelErrors;
return;
}
}
insideFormatConvert<int8_t, int8_t>((int8_t *)input, (int8_t *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
return;
}
isFp16 = isFp16 & (halide_type_float == srcTensor->getType().code);
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
if(!srcDevice && dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
if(isFp16) {
hipLaunchKernelGGL(( C4NHW4_2_NHWC8), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (half *)output,
maxCount, batch, area, channel, UP_DIV(channel, 8) * 8);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( C4NHW4_2_NHWC8), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (float *)output,
maxCount, batch, area, channel, UP_DIV(channel, 8) * 8);
checkKernelErrors;
}
return;
}
if(srcDevice && !dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 4) * 4;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
if(isFp16) {
hipLaunchKernelGGL(( NHWC8_2_C4NHW4), dim3(block_num), dim3(block_size), 0, 0, (half *)input, (float *)output,
maxCount, batch, channel, area, UP_DIV(channel, 4) * 4);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( NHWC8_2_C4NHW4), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (float *)output,
maxCount, batch, channel, area, UP_DIV(channel, 4) * 4);
checkKernelErrors;
}
return;
}
if(srcDevice && dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
if(isFp16) {
hipLaunchKernelGGL(( NCHW_2_NCHW<half, half>), dim3(block_num), dim3(block_size), 0, 0, (half *)input, (half *)output, maxCount);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( NCHW_2_NCHW<float, float>), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (float *)output, maxCount);
checkKernelErrors;
}
return;
}
}
if(!srcDevice) {
if(isFp16) {
insideFormatConvert<float, half>((float *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
} else {
insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
}
} else if(!dstDevice) {
if(isFp16) {
insideFormatConvert<half, float>((half *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
} else {
insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
}
} else {
if(isFp16) {
insideFormatConvert<half, half>((half *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
} else {
insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
}
}
}
};
};
| 5900be9a147c6212a0c5acf94db232e18d215166.cu | //
// Transpose.cu
// MNN
//
// Created by MNN on b'2021/12/09'.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "Transpose.cuh"
#include "core/Macro.h"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
template<typename T0, typename T1>
__global__ void UNPACKCOMMON_4(const T0 *input, T1 *output,
const int total, int inside, int axis, int outside,
int insideStride, int axisStride, int axisAlign,
DivModFast is, DivModFast cs
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmp, x, y, z;
cs.divmod(i, tmp, y);
is.divmod(tmp, z, x);
if (y < axis) {
int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N
int dstOffset = x * insideStride + y * axisStride + z * inside * axis;
output[dstOffset] = input[srcOffset];
}
}
}
template<typename T0, typename T1>
__global__ void UNPACKCOMMON(const T0 *input, T1 *output,
int inside, int axis, int outside,
int insideStride, int axisStride
) {
int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;;
int total = axisAlign * inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmpI = i / axisAlign;
int y = i % axisAlign;
int x = tmpI % inside;
int z = tmpI / inside;
int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N
int dstOffset = x * insideStride + y * axisStride + z * inside * axis;
if (y < axis) {
output[dstOffset] = input[srcOffset];
}
}
}
template<typename T0, typename T1>
__global__ void PACKCOMMON_4(const T0 *input, T1 *output,
int inside, int axis, int outside,
int insideStride, int axisStride,
DivModFast is, DivModFast cs
) {
int axisAlign = UP_DIV(axis, PACK_NUMBER/ 4) * PACK_NUMBER / 4;;
int total = axisAlign * inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmp, x, y, z;
cs.divmod(i, tmp, y);
is.divmod(tmp, z, x);
int dstOffset = (z * inside + x) * axisAlign + y;
int srcOffset = x * insideStride + y * axisStride + z * inside * axis;
if (y < axis) {
output[dstOffset] = input[srcOffset];
} else {
output[dstOffset] = {0, 0, 0, 0};
}
}
}
template<typename T0, typename T1>
__global__ void PACKCOMMON(const T0 *input, T1 *output,
int inside, int axis, int outside,
int insideStride, int axisStride
) {
int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;;
int total = axisAlign * inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int tmpI = i / axisAlign;
int y = i % axisAlign;
int x = tmpI % inside;
int z = tmpI / inside;
int dstOffset = (z * inside + x) * axisAlign + y;
int srcOffset = x * insideStride + y * axisStride + z * inside * axis;
if (y < axis) {
output[dstOffset] = input[srcOffset];
} else {
output[dstOffset] = 0.0;
}
}
}
void PackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
if (info->axis % 4 == 0 && info->axisStride == 1 && \
bytes == 4 && info->insideStride == info->axis) {
int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4;
DivModFast is(info->inside);
DivModFast cs(axis_pack);
PACKCOMMON_4<<<cores, threadNumbers>>>((const int4*)input, (int4*)output,
info->inside, info->axis / 4, info->outside,
info->insideStride / 4, info->axisStride, is, cs);
return;
}
switch (bytes) {
case 4:
PACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 2:
PACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 1:
PACKCOMMON<<<cores, threadNumbers>>>((const int8_t*)input, (int8_t*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
default:
break;
}
}
void UnpackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
if (info->axis % 4 == 0 && info->axisStride == 1 && bytes == 4 && info->insideStride == info->axis) {
int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4;
DivModFast is(info->inside);
DivModFast cs(axis_pack);
const int maxCount = info->inside * axis_pack * info->outside;
int block_num = runtime->blocks_num(maxCount);
int block_size = runtime->threads_num();
int axisAlign = UP_DIV(info->axis / 4, PACK_NUMBER / 4) * PACK_NUMBER / 4;;
UNPACKCOMMON_4<<<block_num, block_size>>>((const int4*)input, (int4*)output,
maxCount, info->inside, info->axis / 4, info->outside,
info->insideStride / 4, info->axisStride, axisAlign, is, cs);
return;
}
switch (bytes) {
case 4:
UNPACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 2:
UNPACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
case 1:
UNPACKCOMMON<<<cores, threadNumbers>>>((const int8_t*)input, (int8_t*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
break;
default:
break;
}
}
void PackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
PACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
void PackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
PACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
void UnpackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
UNPACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (float*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
void UnpackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
UNPACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (half*)output,
info->inside, info->axis, info->outside,
info->insideStride, info->axisStride);
}
template<typename T>
__global__ void TRANSPOSE(const T *input, T *output, const TransposeParam* param) {
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < param->total) {
int x = i % param->dims[0];
int tmp = i / param->dims[0];
int y = tmp % param->dims[1];
int z = tmp / param->dims[1];
int srcOffset = param->srcStride * z + y + x * param->dims[2];
int dstOffset = param->dstStride * z + x + y * param->dims[3];
output[dstOffset] = input[srcOffset];
}
}
#define LOCAL_DIM 8
template <typename T>
__global__ void TRANSPOSE_LOCAL(const T* input, T *output, const TransposeParam* param) {
__shared__ T localM[LOCAL_DIM][LOCAL_DIM + 1];
int num = blockIdx.z;
for (int n = num; n < param->size; n += gridDim.z) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < param->dims[0] && y < param->dims[1]) {
int offset = n * param->srcStride + x * param->dims[2] + y;
localM[threadIdx.y][threadIdx.x] = input[offset];
}
__syncthreads();
x = blockIdx.y * blockDim.y + threadIdx.x;
y = blockIdx.x * blockDim.x + threadIdx.y;
if (x < param->dims[1] && y < param->dims[0]) {
int offset = n * param->dstStride + x * param->dims[3] + y;
output[offset] = localM[threadIdx.x][threadIdx.y];
}
}
}
void Transpose(uint8_t* output, const uint8_t* input, const TransposeParam* cpuParam, const TransposeParam* gpuRegion, int bytes, CUDARuntime* runtime) {
int count = cpuParam->total;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto out = output + bytes * cpuParam->dstOffset;
auto inp = input + bytes * cpuParam->srcOffset;
if (runtime->prop().maxThreadsPerBlock >= LOCAL_DIM * LOCAL_DIM && (cpuParam->dims[0] >= LOCAL_DIM || cpuParam->dims[1] >= LOCAL_DIM)) {
dim3 localSize(LOCAL_DIM, LOCAL_DIM, 1);
//printf("%d, %d - %d, %d - %d\n", cpuParam->size, cpuParam->dims[0], cpuParam->dims[1], cpuParam->dims[2], cpuParam->dims[3]);
int globalZ = ALIMIN(runtime->prop().multiProcessorCount, cpuParam->size);
dim3 globalSize(UP_DIV(cpuParam->dims[0], LOCAL_DIM), UP_DIV(cpuParam->dims[1], LOCAL_DIM), globalZ);
switch (bytes) {
case 4:
TRANSPOSE_LOCAL<<<globalSize, localSize>>>((const float *)inp, (float *)out, gpuRegion);
break;
case 2:
TRANSPOSE_LOCAL<<<globalSize, localSize>>>((const half *)inp, (half *)out, gpuRegion);
break;
case 1:
TRANSPOSE_LOCAL<<<globalSize, localSize>>>((const int8_t *)inp, (int8_t *)out, gpuRegion);
break;
default:
break;
}
return;
}
switch (bytes) {
case 4:
TRANSPOSE<<<block_num, threads_num>>>((int*)inp, (int*)out, gpuRegion);
break;
case 2:
TRANSPOSE<<<block_num, threads_num>>>((int16_t*)inp, (int16_t*)out, gpuRegion);
break;
case 1:
TRANSPOSE<<<block_num, threads_num>>>((int8_t*)inp, (int8_t*)out, gpuRegion);
break;
default:
break;
}
}
template<typename T0, typename T1>
__global__ void NCHW_2_NHWC8(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_ocp,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, chnlp_idx, batch_idx;
d_ocp.divmod(index, temp, chnlp_idx);
d_area.divmod(temp, batch_idx, area_idx);
if(chnlp_idx >= channel) {
output[index] = (T1)0.0f;
continue;
}
int src_offset = (batch_idx * channel + chnlp_idx) * area + area_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NCHW_2_NHWC(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_oc,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, chnl_idx, batch_idx;
d_oc.divmod(index, temp, chnl_idx);
d_area.divmod(temp, batch_idx, area_idx);
int src_offset = (batch_idx * channel + chnl_idx) * area + area_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NHWC_2_NHWC8(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_ocp,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, chnlp_idx, batch_idx;
d_ocp.divmod(index, temp, chnlp_idx);
d_area.divmod(temp, batch_idx, area_idx);
if(chnlp_idx >= channel) {
output[index] = (T1)0.0f;
continue;
}
int src_offset = (batch_idx * area + area_idx) * channel + chnlp_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NHWC8_2_NCHW(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_oc,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, channel_idx, batch_idx;
d_area.divmod(index, temp, area_idx);
d_oc.divmod(temp, batch_idx, channel_idx);
int src_offset = (batch_idx * area + area_idx) * channel_pack + channel_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NHWC8_2_NHWC(const T0* input,
T1* output,
const int maxCount,
const int channel,
const int area,
const int channel_pack,
DivModFast d_oc,
DivModFast d_area
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int area_idx, temp, channel_idx, batch_idx;
d_oc.divmod(index, temp, channel_idx);
d_area.divmod(temp, batch_idx, area_idx);
int src_offset = (batch_idx * area + area_idx) * channel_pack + channel_idx;
output[index] = (T1)input[src_offset];
}
}
template<typename T0, typename T1>
__global__ void NCHW_2_NCHW(const T0* input,
T1* output,
const int maxCount
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
output[index] = (T1)input[index];
}
}
template<typename T0, typename T1>
__global__ void C4NHW4_2_NHWC8(const T0* input,
T1* output,
const int maxCount,
const int batch,
const int area,
const int channel,
const int channel_pack
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int c_idx = index % channel_pack;
int temp = index / channel_pack;
int hw_idx = temp % area;
int batch_idx = temp / area;
if(c_idx >= channel) {
output[index] = (T1)0.0f;
continue;
}
int c4_idx = c_idx >> 2;
int cL_idx = c_idx & 3;
output[index] = (T1)input[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx];
}
}
template<typename T0, typename T1>
__global__ void NHWC8_2_C4NHW4(const T0* input,
T1* output,
const int maxCount,
const int batch,
const int channel,
const int area,
const int channel_pack
) {
for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) {
int c_idx = index % channel_pack;
int temp = index / channel_pack;
int hw_idx = temp % area;
int batch_idx = temp / area;
int channel_8 = ((channel + 7) / 8) * 8;
int c4_idx = c_idx >> 2;
int cL_idx = c_idx & 3;
output[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx] =
(T1)input[(batch_idx * area + hw_idx) * channel_8 + c_idx];
}
}
template<class T0, class T1>
static void insideFormatConvert(T0* input, T1* output, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \
const int area, const int batch, const int channel) {
DivModFast d_oc(channel);
DivModFast d_ocp(UP_DIV(channel, 8) * 8);
DivModFast d_area(area);
if(srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NCHW_2_NHWC8<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_ocp, d_area);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NHWC && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NHWC_2_NHWC8<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_ocp, d_area);
checkKernelErrors;
return;
}
if((srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NCHW) || \
(srcDataFormat == MNN_DATA_FORMAT_NHWC && dstDataFormat == MNN_DATA_FORMAT_NHWC)) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NCHW_2_NCHW<T0, T1><<<block_num, block_size>>>(input, output, maxCount);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NCHW) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NHWC8_2_NCHW<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_oc, d_area);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NHWC) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NHWC8_2_NHWC<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_oc, d_area);
checkKernelErrors;
return;
}
if(srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NHWC) {
const int maxCount = batch * area * channel;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NCHW_2_NHWC<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8,
d_oc, d_area);
checkKernelErrors;
return;
}
MNN_PRINT("insideFormatConvert form %d to %d, not support\n", (int)srcDataFormat, (int)dstDataFormat);
}
void FormatConvert(void* output, void* input, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \
const int area, const int batch, const int channel, const Tensor* srcTensor, bool isFp16, bool srcDevice, bool dstDevice) {
//MNN_PRINT("FormatConvert size batch:%d - plane:%d - channel:%d, %d-%d, %d-%d\n", batch, area, channel, srcDataFormat, dstDataFormat, srcDevice, dstDevice);
if(batch == 0 || area == 0 || channel == 0) {
return;
}
if(srcTensor->getType().bits == 8) {
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
if(!srcDevice && dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
C4NHW4_2_NHWC8<<<block_num, block_size>>>((int8_t *)input, (int8_t *)output,
maxCount, batch, area, channel, UP_DIV(channel, 8) * 8);
checkKernelErrors;
return;
}
if(srcDevice && !dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 4) * 4;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
NHWC8_2_C4NHW4<<<block_num, block_size>>>((int8_t *)input, (int8_t *)output,
maxCount, batch, channel, area, UP_DIV(channel, 4) * 4);
checkKernelErrors;
return;
}
}
insideFormatConvert<int8_t, int8_t>((int8_t *)input, (int8_t *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
return;
}
isFp16 = isFp16 & (halide_type_float == srcTensor->getType().code);
if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) {
if(!srcDevice && dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
if(isFp16) {
C4NHW4_2_NHWC8<<<block_num, block_size>>>((float *)input, (half *)output,
maxCount, batch, area, channel, UP_DIV(channel, 8) * 8);
checkKernelErrors;
} else {
C4NHW4_2_NHWC8<<<block_num, block_size>>>((float *)input, (float *)output,
maxCount, batch, area, channel, UP_DIV(channel, 8) * 8);
checkKernelErrors;
}
return;
}
if(srcDevice && !dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 4) * 4;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
if(isFp16) {
NHWC8_2_C4NHW4<<<block_num, block_size>>>((half *)input, (float *)output,
maxCount, batch, channel, area, UP_DIV(channel, 4) * 4);
checkKernelErrors;
} else {
NHWC8_2_C4NHW4<<<block_num, block_size>>>((float *)input, (float *)output,
maxCount, batch, channel, area, UP_DIV(channel, 4) * 4);
checkKernelErrors;
}
return;
}
if(srcDevice && dstDevice) {
const int maxCount = batch * area * UP_DIV(channel, 8) * 8;
const int block_num = runtime->blocks_num(maxCount);
const int block_size = runtime->threads_num();
if(isFp16) {
NCHW_2_NCHW<half, half><<<block_num, block_size>>>((half *)input, (half *)output, maxCount);
checkKernelErrors;
} else {
NCHW_2_NCHW<float, float><<<block_num, block_size>>>((float *)input, (float *)output, maxCount);
checkKernelErrors;
}
return;
}
}
if(!srcDevice) {
if(isFp16) {
insideFormatConvert<float, half>((float *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
} else {
insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
}
} else if(!dstDevice) {
if(isFp16) {
insideFormatConvert<half, float>((half *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
} else {
insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
}
} else {
if(isFp16) {
insideFormatConvert<half, half>((half *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
} else {
insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel);
}
}
}
};
};
|
7c15ab8faaefa25af0ad7b0acb3550243119b8cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <io/utilities/block_utils.cuh>
#include "gpuinflate.h"
namespace cudf {
namespace io {
// Not supporting streams longer than this (not what snappy is intended for)
#define SNAPPY_MAX_STREAM_SIZE 0x7fffffff
#define LOG2_BATCH_SIZE 5
#define BATCH_SIZE (1 << LOG2_BATCH_SIZE)
#define LOG2_BATCH_COUNT 2
#define BATCH_COUNT (1 << LOG2_BATCH_COUNT)
#define LOG2_PREFETCH_SIZE 9
#define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks
#define LOG_CYCLECOUNT 0
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
**/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
**/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[BATCH_COUNT]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; ///< LZ77 batch data
uint8_t buf[PREFETCH_SIZE]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
**/
struct unsnap_state_s {
const uint8_t *base; ///< base ptr of compressed stream
const uint8_t *end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< bytes to uncompressed remaining
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
gpu_inflate_input_s in; ///< input parameters for current block
};
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t)
{
const uint8_t *base = s->base;
uint32_t end = (uint32_t)(s->end - base);
uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
SYNCWARP();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, PREFETCH_SIZE - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
NANOSLEEP(100);
}
}
blen = SHFL0(blen);
if (t < blen) { s->q.buf[(pos + t) & (PREFETCH_SIZE - 1)] = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*
**/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
**/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
**/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
#define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE - 1)]
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t)
{
uint32_t cur = 0;
uint32_t end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s *b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
b = &s->q.batch[batch * BATCH_SIZE];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = SHFL0(cur);
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
v0 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 32);
v1 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 64);
v2 = BALLOT((b0 == 4) || (b0 & 2));
len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = BALLOT(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = SHFL(blen, batch_len - 1);
cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < BATCH_SIZE - 2 && SHFL(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = BALLOT(clen & 1);
v1 = BALLOT((clen >> 1) & 1);
len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) +
__popc((len3_mask & 0x55555555) & mask_t);
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= BATCH_SIZE);
batch_add = __ffs(BALLOT(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = SHFL(blen, batch_add - 1);
cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < BATCH_SIZE - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < BATCH_SIZE) {
uint32_t blen, offset;
uint8_t b0 = READ_BYTE(cur);
if (b0 & 3) {
uint8_t b1 = READ_BYTE(cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (READ_BYTE(cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (READ_BYTE(cur + 3) << 16) | (READ_BYTE(cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = READ_BYTE(cur + 1);
if (num_bytes > 1) {
blen |= READ_BYTE(cur + 2) << 8;
if (num_bytes > 2) {
blen |= READ_BYTE(cur + 3) << 16;
if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (BATCH_COUNT - 1);
}
}
batch_len = SHFL0(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); }
}
if (batch_len != BATCH_SIZE) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
**/
template <typename Storage>
__device__ void snappy_process_symbols(unsnap_state_s *s, int t, Storage &temp_storage)
{
const uint8_t *literal_base = s->base;
uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice);
int batch = 0;
do {
volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); }
} else {
batch_len = 0;
}
batch_len = SHFL0(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs);
uint32_t start_mask =
hipcub::WarpReduce<uint32_t>(temp_storage).Sum((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
start_mask = SHFL0(start_mask);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - SHFL(bofs - blen_t, it);
int32_t dist = SHFL(dist_t, it);
if (it < n) {
const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += SHFL(bofs, n - 1);
blen_t = SHFL(blen_t, (n + t) & 0x1f);
dist_t = SHFL(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = SHFL(blen_t, i);
int32_t dist = SHFL(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = SHFL(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
SYNCWARP();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (1);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
**/
template <int block_size>
__global__ void __launch_bounds__(block_size)
unsnap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs)
{
__shared__ __align__(16) unsnap_state_s state_g;
__shared__ hipcub::WarpReduce<uint32_t>::TempStorage temp_storage;
int t = threadIdx.x;
unsnap_state_s *s = &state_g;
int strm_id = blockIdx.x;
if (t < sizeof(gpu_inflate_input_s) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->in)[t] =
reinterpret_cast<const uint32_t *>(&inputs[strm_id])[t];
__threadfence_block();
}
if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice);
const uint8_t *end = cur + s->in.srcSize;
s->error = 0;
#if LOG_CYCLECOUNT
s->tstart = clock();
#endif
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f, temp_storage);
}
__syncthreads();
}
if (!t) {
outputs[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
outputs[strm_id].status = s->error;
#if LOG_CYCLECOUNT
outputs[strm_id].reserved = clock() - s->tstart;
#else
outputs[strm_id].reserved = 0;
#endif
}
}
hipError_t __host__ gpu_unsnap(gpu_inflate_input_s *inputs,
gpu_inflate_status_s *outputs,
int count,
hipStream_t stream)
{
uint32_t count32 = (count > 0) ? count : 0;
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count
hipLaunchKernelGGL(( unsnap_kernel<128>), dim3(dim_grid), dim3(dim_block), 0, stream, inputs, outputs);
return hipSuccess;
}
} // namespace io
} // namespace cudf
| 7c15ab8faaefa25af0ad7b0acb3550243119b8cb.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <io/utilities/block_utils.cuh>
#include "gpuinflate.h"
namespace cudf {
namespace io {
// Not supporting streams longer than this (not what snappy is intended for)
#define SNAPPY_MAX_STREAM_SIZE 0x7fffffff
#define LOG2_BATCH_SIZE 5
#define BATCH_SIZE (1 << LOG2_BATCH_SIZE)
#define LOG2_BATCH_COUNT 2
#define BATCH_COUNT (1 << LOG2_BATCH_COUNT)
#define LOG2_PREFETCH_SIZE 9
#define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks
#define LOG_CYCLECOUNT 0
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
**/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
**/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[BATCH_COUNT]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; ///< LZ77 batch data
uint8_t buf[PREFETCH_SIZE]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
**/
struct unsnap_state_s {
const uint8_t *base; ///< base ptr of compressed stream
const uint8_t *end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< bytes to uncompressed remaining
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
gpu_inflate_input_s in; ///< input parameters for current block
};
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t)
{
const uint8_t *base = s->base;
uint32_t end = (uint32_t)(s->end - base);
uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
SYNCWARP();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, PREFETCH_SIZE - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
NANOSLEEP(100);
}
}
blen = SHFL0(blen);
if (t < blen) { s->q.buf[(pos + t) & (PREFETCH_SIZE - 1)] = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*
**/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
**/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
**/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
#define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE - 1)]
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
**/
__device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t)
{
uint32_t cur = 0;
uint32_t end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s *b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
b = &s->q.batch[batch * BATCH_SIZE];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = SHFL0(cur);
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
v0 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 32);
v1 = BALLOT((b0 == 4) || (b0 & 2));
b0 = READ_BYTE(cur_t + 64);
v2 = BALLOT((b0 == 4) || (b0 & 2));
len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = BALLOT(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = SHFL(blen, batch_len - 1);
cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < BATCH_SIZE - 2 && SHFL(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = READ_BYTE(cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = BALLOT(clen & 1);
v1 = BALLOT((clen >> 1) & 1);
len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) +
__popc((len3_mask & 0x55555555) & mask_t);
b0 = READ_BYTE(cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= BATCH_SIZE);
batch_add = __ffs(BALLOT(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1)
: (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = SHFL0(bytes_left);
dst_pos = SHFL0(dst_pos);
short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = SHFL(blen, batch_add - 1);
cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < BATCH_SIZE - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < BATCH_SIZE) {
uint32_t blen, offset;
uint8_t b0 = READ_BYTE(cur);
if (b0 & 3) {
uint8_t b1 = READ_BYTE(cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (READ_BYTE(cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (READ_BYTE(cur + 3) << 16) | (READ_BYTE(cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = READ_BYTE(cur + 1);
if (num_bytes > 1) {
blen |= READ_BYTE(cur + 2) << 8;
if (num_bytes > 2) {
blen |= READ_BYTE(cur + 3) << 16;
if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); }
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (BATCH_COUNT - 1);
}
}
batch_len = SHFL0(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); }
}
if (batch_len != BATCH_SIZE) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
**/
template <typename Storage>
__device__ void snappy_process_symbols(unsnap_state_s *s, int t, Storage &temp_storage)
{
const uint8_t *literal_base = s->base;
uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice);
int batch = 0;
do {
volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); }
} else {
batch_len = 0;
}
batch_len = SHFL0(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs);
uint32_t start_mask =
cub::WarpReduce<uint32_t>(temp_storage).Sum((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
start_mask = SHFL0(start_mask);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - SHFL(bofs - blen_t, it);
int32_t dist = SHFL(dist_t, it);
if (it < n) {
const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += SHFL(bofs, n - 1);
blen_t = SHFL(blen_t, (n + t) & 0x1f);
dist_t = SHFL(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = SHFL(blen_t, i);
int32_t dist = SHFL(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = SHFL(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
SYNCWARP();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (BATCH_COUNT - 1);
} while (1);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
**/
template <int block_size>
__global__ void __launch_bounds__(block_size)
unsnap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs)
{
__shared__ __align__(16) unsnap_state_s state_g;
__shared__ cub::WarpReduce<uint32_t>::TempStorage temp_storage;
int t = threadIdx.x;
unsnap_state_s *s = &state_g;
int strm_id = blockIdx.x;
if (t < sizeof(gpu_inflate_input_s) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->in)[t] =
reinterpret_cast<const uint32_t *>(&inputs[strm_id])[t];
__threadfence_block();
}
if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice);
const uint8_t *end = cur + s->in.srcSize;
s->error = 0;
#if LOG_CYCLECOUNT
s->tstart = clock();
#endif
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f, temp_storage);
}
__syncthreads();
}
if (!t) {
outputs[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
outputs[strm_id].status = s->error;
#if LOG_CYCLECOUNT
outputs[strm_id].reserved = clock() - s->tstart;
#else
outputs[strm_id].reserved = 0;
#endif
}
}
cudaError_t __host__ gpu_unsnap(gpu_inflate_input_s *inputs,
gpu_inflate_status_s *outputs,
int count,
cudaStream_t stream)
{
uint32_t count32 = (count > 0) ? count : 0;
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count
unsnap_kernel<128><<<dim_grid, dim_block, 0, stream>>>(inputs, outputs);
return cudaSuccess;
}
} // namespace io
} // namespace cudf
|
608dc50efbd39aca22df45cb64142edd879dc6c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Luigy Machaca Arcana
// Computer science - Arequipa, Per 2017
#include <stdlib.h>
#include <stdio.h>
#define WIDTH_TILE 32
__global__ void matrix_mult_shared(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
__shared__ int Mds[WIDTH_TILE][WIDTH_TILE];
__shared__ int Nds[WIDTH_TILE][WIDTH_TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int value = 0;
int row = by*WIDTH_TILE + ty;
int col = bx*WIDTH_TILE + tx;
int width = n_cols_a; //n_cols_a == n_rows_b
int k;
for( k=0 ; k<(int)(width-1+WIDTH_TILE)/(int)WIDTH_TILE ; ++k ){
if (k*WIDTH_TILE+tx < n_cols_a && row < n_rows_a){
Mds[ty][tx] = dd_mat_a[row][k*WIDTH_TILE+tx];
}
else{
Mds[ty][tx] = 0;
}
if (k*WIDTH_TILE+ty < n_rows_b && col < n_cols_b){
Nds[ty][tx] = dd_mat_b[k*WIDTH_TILE+ty][col];
}
else{
Nds[ty][tx] = 0;
}
__syncthreads();
int m;
for(m=0 ; m<WIDTH_TILE ; ++m){
value += Mds[ty][m]*Nds[m][tx];
}
__syncthreads();
}
if(row<n_rows_c && col<n_cols_c){
dd_mat_c[row][col]=value;
}
}
__global__
void matrix_mult(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int value=0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int x = tx + blockIdx.x*blockDim.x;
int y = ty + blockIdx.y*blockDim.y;
if( y<n_rows_c && x<n_cols_c ){
int i;
for(i=0 ; i<n_cols_a ; i++){
value += dd_mat_a[y][i] * dd_mat_b[i][x];
}
dd_mat_c[y][x]=value;
}
}
void fill(int** mat, int n, int m){
srand(time(0));
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
mat[i][j] = rand()%3+1;
//mat[i][j] = 1;
}
}
void fill_value(int** mat,int n, int m, int value=0){
int i,j;
for(i=0;i<n;i++)
for(j=0;j<m;j++)
mat[i][j] = value;
}
void print(int** mat,int n, int m){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
printf("%d ",mat[i][j]);
printf("\n");
}
}
void create(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols, int fillValue=-1){
int i;
mat = (int** )malloc(sizeof(int*) * n_rows );
mat[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
if(fillValue==-1){
fill(mat,n_rows,n_cols);
}
else{
fill_value(mat,n_rows,n_cols,fillValue);
}
int size_row = sizeof(int*) * n_rows;
d_mat = (int**) malloc(size_row);
hipMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols );
hipMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,hipMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
hipMalloc((void***)& dd_mat, size_row );
hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice );
}
int main(int argc, char *argv[]){
int n_rows_a = 3;
int n_cols_a = 5;
int n_rows_b = 5;
int n_cols_b = 7;
int n_rows_c = n_rows_a;
int n_cols_c = n_cols_b;
if(n_cols_a!=n_rows_b){
printf("error n_cols_a!=n_rows_b");
return 0;
}
int** mat_a; int** d_mat_a; int** dd_mat_a;
int** mat_b; int** d_mat_b; int** dd_mat_b;
int** mat_c; int** d_mat_c; int** dd_mat_c;
create( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a );
create( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b );
create( mat_c, d_mat_c, dd_mat_c, n_rows_c, n_cols_c, 0 );
/////////////////////////////////////////
dim3 blockNum(WIDTH_TILE,WIDTH_TILE,1);
dim3 grid((int)(n_cols_c-1+blockNum.x)/blockNum.x,(int)(n_rows_c-1+blockNum.y)/blockNum.y,1);
printf("ty: %d, tx: %d\n",(int)(n_rows_c-1+blockNum.y)/blockNum.y, (int)(n_cols_c-1+blockNum.x)/blockNum.x);
printf("grid_row: %d, grid_col: %d\n",grid.x , grid.y );
////////////////////////////////////////////////////
hipLaunchKernelGGL(( matrix_mult_shared), dim3(grid),dim3(blockNum), 0, 0, dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult_shared_mejorado<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
/////////////////////////////////////////////////////
hipMemcpy(mat_c[0],d_mat_c[0],sizeof(int)*n_rows_c*n_cols_c,hipMemcpyDeviceToHost);
printf("//////////////////\n");
printf("//////////////////\n");
print(mat_a,n_rows_a,n_cols_a);
printf("//////////////////\n");
print(mat_b,n_rows_b,n_cols_b);
printf("//////////////////\n");
print(mat_c,n_rows_c,n_cols_c);
hipFree(dd_mat_a);
hipFree(dd_mat_b);
hipFree(dd_mat_c);
hipFree(d_mat_a);
hipFree(d_mat_b);
hipFree(d_mat_c);
free(mat_a);
free(mat_b);
free(mat_c);
return 0;
} | 608dc50efbd39aca22df45cb64142edd879dc6c9.cu | // Luigy Machaca Arcana
// Computer science - Arequipa, Perú 2017
#include <stdlib.h>
#include <stdio.h>
#define WIDTH_TILE 32
__global__ void matrix_mult_shared(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
__shared__ int Mds[WIDTH_TILE][WIDTH_TILE];
__shared__ int Nds[WIDTH_TILE][WIDTH_TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int value = 0;
int row = by*WIDTH_TILE + ty;
int col = bx*WIDTH_TILE + tx;
int width = n_cols_a; //n_cols_a == n_rows_b
int k;
for( k=0 ; k<(int)(width-1+WIDTH_TILE)/(int)WIDTH_TILE ; ++k ){
if (k*WIDTH_TILE+tx < n_cols_a && row < n_rows_a){
Mds[ty][tx] = dd_mat_a[row][k*WIDTH_TILE+tx];
}
else{
Mds[ty][tx] = 0;
}
if (k*WIDTH_TILE+ty < n_rows_b && col < n_cols_b){
Nds[ty][tx] = dd_mat_b[k*WIDTH_TILE+ty][col];
}
else{
Nds[ty][tx] = 0;
}
__syncthreads();
int m;
for(m=0 ; m<WIDTH_TILE ; ++m){
value += Mds[ty][m]*Nds[m][tx];
}
__syncthreads();
}
if(row<n_rows_c && col<n_cols_c){
dd_mat_c[row][col]=value;
}
}
__global__
void matrix_mult(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){
int value=0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int x = tx + blockIdx.x*blockDim.x;
int y = ty + blockIdx.y*blockDim.y;
if( y<n_rows_c && x<n_cols_c ){
int i;
for(i=0 ; i<n_cols_a ; i++){
value += dd_mat_a[y][i] * dd_mat_b[i][x];
}
dd_mat_c[y][x]=value;
}
}
void fill(int** mat, int n, int m){
srand(time(0));
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
mat[i][j] = rand()%3+1;
//mat[i][j] = 1;
}
}
void fill_value(int** mat,int n, int m, int value=0){
int i,j;
for(i=0;i<n;i++)
for(j=0;j<m;j++)
mat[i][j] = value;
}
void print(int** mat,int n, int m){
int i,j;
for(i=0; i<n ;i++){
for(j=0; j<m ;j++)
printf("%d ",mat[i][j]);
printf("\n");
}
}
void create(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols, int fillValue=-1){
int i;
mat = (int** )malloc(sizeof(int*) * n_rows );
mat[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols );
for( i=1 ; i<n_rows ; i++ ){
mat[i] = mat[i-1]+n_cols;
}
if(fillValue==-1){
fill(mat,n_rows,n_cols);
}
else{
fill_value(mat,n_rows,n_cols,fillValue);
}
int size_row = sizeof(int*) * n_rows;
d_mat = (int**) malloc(size_row);
cudaMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols );
cudaMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,cudaMemcpyHostToDevice);
for( i=1 ; i<n_rows ; i++ ){
d_mat[i] = (d_mat[0]+i*n_cols);
}
cudaMalloc((void***)& dd_mat, size_row );
cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice );
}
int main(int argc, char *argv[]){
int n_rows_a = 3;
int n_cols_a = 5;
int n_rows_b = 5;
int n_cols_b = 7;
int n_rows_c = n_rows_a;
int n_cols_c = n_cols_b;
if(n_cols_a!=n_rows_b){
printf("error n_cols_a!=n_rows_b");
return 0;
}
int** mat_a; int** d_mat_a; int** dd_mat_a;
int** mat_b; int** d_mat_b; int** dd_mat_b;
int** mat_c; int** d_mat_c; int** dd_mat_c;
create( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a );
create( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b );
create( mat_c, d_mat_c, dd_mat_c, n_rows_c, n_cols_c, 0 );
/////////////////////////////////////////
dim3 blockNum(WIDTH_TILE,WIDTH_TILE,1);
dim3 grid((int)(n_cols_c-1+blockNum.x)/blockNum.x,(int)(n_rows_c-1+blockNum.y)/blockNum.y,1);
printf("ty: %d, tx: %d\n",(int)(n_rows_c-1+blockNum.y)/blockNum.y, (int)(n_cols_c-1+blockNum.x)/blockNum.x);
printf("grid_row: %d, grid_col: %d\n",grid.x , grid.y );
////////////////////////////////////////////////////
matrix_mult_shared<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult_shared_mejorado<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
//matrix_mult<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c);
/////////////////////////////////////////////////////
cudaMemcpy(mat_c[0],d_mat_c[0],sizeof(int)*n_rows_c*n_cols_c,cudaMemcpyDeviceToHost);
printf("//////////////////\n");
printf("//////////////////\n");
print(mat_a,n_rows_a,n_cols_a);
printf("//////////////////\n");
print(mat_b,n_rows_b,n_cols_b);
printf("//////////////////\n");
print(mat_c,n_rows_c,n_cols_c);
cudaFree(dd_mat_a);
cudaFree(dd_mat_b);
cudaFree(dd_mat_c);
cudaFree(d_mat_a);
cudaFree(d_mat_b);
cudaFree(d_mat_c);
free(mat_a);
free(mat_b);
free(mat_c);
return 0;
} |
5d0e458ecf22eca08f6d32e3413db9423408d1d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
struct AffineTransform
{
static const int rows = 2;
static __device__ __forceinline__ float2 calcCoord(const float warpMat[AffineTransform::rows * 3], int x, int y)
{
const float xcoo = warpMat[0] * x + warpMat[1] * y + warpMat[2];
const float ycoo = warpMat[3] * x + warpMat[4] * y + warpMat[5];
return make_float2(xcoo, ycoo);
}
struct Coefficients
{
Coefficients(const float* c_)
{
for(int i = 0; i < AffineTransform::rows * 3; i++)
c[i] = c_[i];
}
float c[AffineTransform::rows * 3];
};
};
struct PerspectiveTransform
{
static const int rows = 3;
static __device__ __forceinline__ float2 calcCoord(const float warpMat[PerspectiveTransform::rows * 3], int x, int y)
{
const float coeff = 1.0f / (warpMat[6] * x + warpMat[7] * y + warpMat[8]);
const float xcoo = coeff * (warpMat[0] * x + warpMat[1] * y + warpMat[2]);
const float ycoo = coeff * (warpMat[3] * x + warpMat[4] * y + warpMat[5]);
return make_float2(xcoo, ycoo);
}
struct Coefficients
{
Coefficients(const float* c_)
{
for(int i = 0; i < PerspectiveTransform::rows * 3; i++)
c[i] = c_[i];
}
float c[PerspectiveTransform::rows * 3];
};
};
///////////////////////////////////////////////////////////////////
// Build Maps
template <class Transform> __global__ void buildWarpMaps(PtrStepSzf xmap, PtrStepf ymap, const typename Transform::Coefficients warpMat)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xmap.cols && y < xmap.rows)
{
const float2 coord = Transform::calcCoord(warpMat.c, x, y);
xmap(y, x) = coord.x;
ymap(y, x) = coord.y;
}
}
template <class Transform> void buildWarpMaps_caller(PtrStepSzf xmap, PtrStepSzf ymap, const float warpMat[Transform::rows * 3], hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(xmap.cols, block.x), divUp(xmap.rows, block.y));
hipLaunchKernelGGL(( buildWarpMaps<Transform>), dim3(grid), dim3(block), 0, stream, xmap, ymap, warpMat);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void buildWarpAffineMaps_gpu(float coeffs[2 * 3], PtrStepSzf xmap, PtrStepSzf ymap, hipStream_t stream)
{
buildWarpMaps_caller<AffineTransform>(xmap, ymap, coeffs, stream);
}
void buildWarpPerspectiveMaps_gpu(float coeffs[3 * 3], PtrStepSzf xmap, PtrStepSzf ymap, hipStream_t stream)
{
buildWarpMaps_caller<PerspectiveTransform>(xmap, ymap, coeffs, stream);
}
///////////////////////////////////////////////////////////////////
// Warp
template <class Transform, class Ptr2D, typename T> __global__ void warp(const Ptr2D src, PtrStepSz<T> dst, const typename Transform::Coefficients warpMat)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float2 coord = Transform::calcCoord(warpMat.c, x, y);
dst.ptr(y)[x] = saturate_cast<T>(src(coord.y, coord.x));
}
}
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], hipStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
hipLaunchKernelGGL(( warp<Transform>), dim3(grid), dim3(block), 0, stream, filter_src, dst, warpMat);
cudaSafeCall( hipGetLastError() );
}
};
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], bool)
{
CV_UNUSED(xoff);
CV_UNUSED(yoff);
CV_UNUSED(srcWhole);
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
hipLaunchKernelGGL(( warp<Transform>), dim3(grid), dim3(block), 0, 0, filter_src, dst, warpMat);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
};
#define OPENCV_CUDA_IMPLEMENT_WARP_TEX(type) \
texture< type , hipTextureType2D > tex_warp_ ## type (0, hipFilterModePoint, hipAddressModeClamp); \
struct tex_warp_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff, yoff; \
tex_warp_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_warp_ ## type , x + xoff, y + yoff); \
} \
}; \
template <class Transform, template <typename> class Filter, template <typename> class B> struct WarpDispatcherNonStream<Transform, Filter, B, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz< type > dst, const float* borderValue, const float warpMat[Transform::rows*3], bool cc20) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(32, cc20 ? 8 : 4); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_warp_ ## type , srcWhole); \
tex_warp_ ## type ##_reader texSrc(xoff, yoff); \
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_warp_ ## type ##_reader, B<work_type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_warp_ ## type ##_reader, B<work_type> > > filter_src(brdSrc); \
hipLaunchKernelGGL(( warp<Transform>), dim3(grid), dim3(block), 0, 0, filter_src, dst, warpMat); \
cudaSafeCall( hipGetLastError() ); \
cudaSafeCall( hipDeviceSynchronize() ); \
} \
}; \
template <class Transform, template <typename> class Filter> struct WarpDispatcherNonStream<Transform, Filter, BrdReplicate, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz< type > dst, const float*, const float warpMat[Transform::rows*3], bool) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_warp_ ## type , srcWhole); \
tex_warp_ ## type ##_reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter< tex_warp_ ## type ##_reader > filter_src(texSrc); \
hipLaunchKernelGGL(( warp<Transform>), dim3(grid), dim3(block), 0, 0, filter_src, dst, warpMat); \
} \
else \
{ \
BrdReplicate<type> brd(src.rows, src.cols); \
BorderReader< tex_warp_ ## type ##_reader, BrdReplicate<type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_warp_ ## type ##_reader, BrdReplicate<type> > > filter_src(brdSrc); \
hipLaunchKernelGGL(( warp<Transform>), dim3(grid), dim3(block), 0, 0, filter_src, dst, warpMat); \
} \
cudaSafeCall( hipGetLastError() ); \
cudaSafeCall( hipDeviceSynchronize() ); \
} \
};
OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar4)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(schar)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(char2)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(char4)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(short)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(short2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(short4)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(int)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(int2)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(int4)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(float)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(float2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_WARP_TEX
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], hipStream_t stream, bool cc20)
{
if (stream == 0)
WarpDispatcherNonStream<Transform, Filter, B, T>::call(src, srcWhole, xoff, yoff, dst, borderValue, warpMat, cc20);
else
WarpDispatcherStream<Transform, Filter, B, T>::call(src, dst, borderValue, warpMat, stream, cc20);
}
};
template <class Transform, typename T>
void warp_caller(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, const float warpMat[Transform::rows*3], hipStream_t stream, bool cc20)
{
typedef void (*func_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], hipStream_t stream, bool cc20);
static const func_t funcs[3][5] =
{
{
WarpDispatcher<Transform, PointFilter, BrdConstant, T>::call,
WarpDispatcher<Transform, PointFilter, BrdReplicate, T>::call,
WarpDispatcher<Transform, PointFilter, BrdReflect, T>::call,
WarpDispatcher<Transform, PointFilter, BrdWrap, T>::call,
WarpDispatcher<Transform, PointFilter, BrdReflect101, T>::call
},
{
WarpDispatcher<Transform, LinearFilter, BrdConstant, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdReplicate, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdReflect, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdWrap, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdReflect101, T>::call
},
{
WarpDispatcher<Transform, CubicFilter, BrdConstant, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdReplicate, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdReflect, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdWrap, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdReflect101, T>::call
}
};
funcs[interpolation][borderMode](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff,
static_cast< PtrStepSz<T> >(dst), borderValue, warpMat, stream, cc20);
}
template <typename T> void warpAffine_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, hipStream_t stream, bool cc20)
{
warp_caller<AffineTransform, T>(src, srcWhole, xoff, yoff, dst, interpolation, borderMode, borderValue, coeffs, stream, cc20);
}
template void warpAffine_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpAffine_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpAffine_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template <typename T> void warpPerspective_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, hipStream_t stream, bool cc20)
{
warp_caller<PerspectiveTransform, T>(src, srcWhole, xoff, yoff, dst, interpolation, borderMode, borderValue, coeffs, stream, cc20);
}
template void warpPerspective_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void warpPerspective_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void warpPerspective_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
| 5d0e458ecf22eca08f6d32e3413db9423408d1d6.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
struct AffineTransform
{
static const int rows = 2;
static __device__ __forceinline__ float2 calcCoord(const float warpMat[AffineTransform::rows * 3], int x, int y)
{
const float xcoo = warpMat[0] * x + warpMat[1] * y + warpMat[2];
const float ycoo = warpMat[3] * x + warpMat[4] * y + warpMat[5];
return make_float2(xcoo, ycoo);
}
struct Coefficients
{
Coefficients(const float* c_)
{
for(int i = 0; i < AffineTransform::rows * 3; i++)
c[i] = c_[i];
}
float c[AffineTransform::rows * 3];
};
};
struct PerspectiveTransform
{
static const int rows = 3;
static __device__ __forceinline__ float2 calcCoord(const float warpMat[PerspectiveTransform::rows * 3], int x, int y)
{
const float coeff = 1.0f / (warpMat[6] * x + warpMat[7] * y + warpMat[8]);
const float xcoo = coeff * (warpMat[0] * x + warpMat[1] * y + warpMat[2]);
const float ycoo = coeff * (warpMat[3] * x + warpMat[4] * y + warpMat[5]);
return make_float2(xcoo, ycoo);
}
struct Coefficients
{
Coefficients(const float* c_)
{
for(int i = 0; i < PerspectiveTransform::rows * 3; i++)
c[i] = c_[i];
}
float c[PerspectiveTransform::rows * 3];
};
};
///////////////////////////////////////////////////////////////////
// Build Maps
template <class Transform> __global__ void buildWarpMaps(PtrStepSzf xmap, PtrStepf ymap, const typename Transform::Coefficients warpMat)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xmap.cols && y < xmap.rows)
{
const float2 coord = Transform::calcCoord(warpMat.c, x, y);
xmap(y, x) = coord.x;
ymap(y, x) = coord.y;
}
}
template <class Transform> void buildWarpMaps_caller(PtrStepSzf xmap, PtrStepSzf ymap, const float warpMat[Transform::rows * 3], cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(xmap.cols, block.x), divUp(xmap.rows, block.y));
buildWarpMaps<Transform><<<grid, block, 0, stream>>>(xmap, ymap, warpMat);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void buildWarpAffineMaps_gpu(float coeffs[2 * 3], PtrStepSzf xmap, PtrStepSzf ymap, cudaStream_t stream)
{
buildWarpMaps_caller<AffineTransform>(xmap, ymap, coeffs, stream);
}
void buildWarpPerspectiveMaps_gpu(float coeffs[3 * 3], PtrStepSzf xmap, PtrStepSzf ymap, cudaStream_t stream)
{
buildWarpMaps_caller<PerspectiveTransform>(xmap, ymap, coeffs, stream);
}
///////////////////////////////////////////////////////////////////
// Warp
template <class Transform, class Ptr2D, typename T> __global__ void warp(const Ptr2D src, PtrStepSz<T> dst, const typename Transform::Coefficients warpMat)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float2 coord = Transform::calcCoord(warpMat.c, x, y);
dst.ptr(y)[x] = saturate_cast<T>(src(coord.y, coord.x));
}
}
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], cudaStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
warp<Transform><<<grid, block, 0, stream>>>(filter_src, dst, warpMat);
cudaSafeCall( cudaGetLastError() );
}
};
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], bool)
{
CV_UNUSED(xoff);
CV_UNUSED(yoff);
CV_UNUSED(srcWhole);
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
warp<Transform><<<grid, block>>>(filter_src, dst, warpMat);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
#define OPENCV_CUDA_IMPLEMENT_WARP_TEX(type) \
texture< type , cudaTextureType2D > tex_warp_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_warp_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff, yoff; \
tex_warp_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_warp_ ## type , x + xoff, y + yoff); \
} \
}; \
template <class Transform, template <typename> class Filter, template <typename> class B> struct WarpDispatcherNonStream<Transform, Filter, B, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz< type > dst, const float* borderValue, const float warpMat[Transform::rows*3], bool cc20) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(32, cc20 ? 8 : 4); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_warp_ ## type , srcWhole); \
tex_warp_ ## type ##_reader texSrc(xoff, yoff); \
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_warp_ ## type ##_reader, B<work_type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_warp_ ## type ##_reader, B<work_type> > > filter_src(brdSrc); \
warp<Transform><<<grid, block>>>(filter_src, dst, warpMat); \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
}; \
template <class Transform, template <typename> class Filter> struct WarpDispatcherNonStream<Transform, Filter, BrdReplicate, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz< type > dst, const float*, const float warpMat[Transform::rows*3], bool) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_warp_ ## type , srcWhole); \
tex_warp_ ## type ##_reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter< tex_warp_ ## type ##_reader > filter_src(texSrc); \
warp<Transform><<<grid, block>>>(filter_src, dst, warpMat); \
} \
else \
{ \
BrdReplicate<type> brd(src.rows, src.cols); \
BorderReader< tex_warp_ ## type ##_reader, BrdReplicate<type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_warp_ ## type ##_reader, BrdReplicate<type> > > filter_src(brdSrc); \
warp<Transform><<<grid, block>>>(filter_src, dst, warpMat); \
} \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
};
OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(uchar4)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(schar)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(char2)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(char4)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(short)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(short2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(short4)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(int)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(int2)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(int4)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(float)
//OPENCV_CUDA_IMPLEMENT_WARP_TEX(float2)
OPENCV_CUDA_IMPLEMENT_WARP_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_WARP_TEX
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], cudaStream_t stream, bool cc20)
{
if (stream == 0)
WarpDispatcherNonStream<Transform, Filter, B, T>::call(src, srcWhole, xoff, yoff, dst, borderValue, warpMat, cc20);
else
WarpDispatcherStream<Transform, Filter, B, T>::call(src, dst, borderValue, warpMat, stream, cc20);
}
};
template <class Transform, typename T>
void warp_caller(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, const float warpMat[Transform::rows*3], cudaStream_t stream, bool cc20)
{
typedef void (*func_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<T> dst, const float* borderValue, const float warpMat[Transform::rows*3], cudaStream_t stream, bool cc20);
static const func_t funcs[3][5] =
{
{
WarpDispatcher<Transform, PointFilter, BrdConstant, T>::call,
WarpDispatcher<Transform, PointFilter, BrdReplicate, T>::call,
WarpDispatcher<Transform, PointFilter, BrdReflect, T>::call,
WarpDispatcher<Transform, PointFilter, BrdWrap, T>::call,
WarpDispatcher<Transform, PointFilter, BrdReflect101, T>::call
},
{
WarpDispatcher<Transform, LinearFilter, BrdConstant, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdReplicate, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdReflect, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdWrap, T>::call,
WarpDispatcher<Transform, LinearFilter, BrdReflect101, T>::call
},
{
WarpDispatcher<Transform, CubicFilter, BrdConstant, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdReplicate, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdReflect, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdWrap, T>::call,
WarpDispatcher<Transform, CubicFilter, BrdReflect101, T>::call
}
};
funcs[interpolation][borderMode](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff,
static_cast< PtrStepSz<T> >(dst), borderValue, warpMat, stream, cc20);
}
template <typename T> void warpAffine_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, cudaStream_t stream, bool cc20)
{
warp_caller<AffineTransform, T>(src, srcWhole, xoff, yoff, dst, interpolation, borderMode, borderValue, coeffs, stream, cc20);
}
template void warpAffine_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpAffine_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpAffine_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template <typename T> void warpPerspective_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, cudaStream_t stream, bool cc20)
{
warp_caller<PerspectiveTransform, T>(src, srcWhole, xoff, yoff, dst, interpolation, borderMode, borderValue, coeffs, stream, cc20);
}
template void warpPerspective_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void warpPerspective_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void warpPerspective_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[3 * 3], PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
0fa93471cdb1a6bcca0799741976301e6fc3d7b3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "../inc/piestimator.h"
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
using std::string;
using std::vector;
__device__ unsigned int reduce_sum(unsigned int in)
{
extern __shared__ unsigned int sdata[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdata[ltid] += sdata[ltid + s];
}
__syncthreads();
}
return sdata[0];
}
// Estimator kernel
template <typename Real>
__global__ void computeValue(unsigned int *const results,
const Real *const points,
const unsigned int numSims)
{
// Determine thread ID
unsigned int bid = blockIdx.x;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const Real *pointx = points + tid;
const Real *pointy = pointx + numSims;
// Count the number of points which lie inside the unit quarter-circle
unsigned int pointsInside = 0;
for (unsigned int i = tid ; i < numSims ; i += step, pointx += step, pointy += step)
{
Real x = *pointx;
Real y = *pointy;
Real l2norm2 = x * x + y * y;
if (l2norm2 < static_cast<Real>(1))
{
pointsInside++;
}
}
// Reduce within the block
pointsInside = reduce_sum(pointsInside);
// Store the result
if (threadIdx.x == 0)
{
results[bid] = pointsInside;
}
}
template <typename Real>
PiEstimator<Real>::PiEstimator(unsigned int numSims, unsigned int device, unsigned int threadBlockSize)
: m_numSims(numSims),
m_device(device),
m_threadBlockSize(threadBlockSize)
{
}
template <typename Real>
Real PiEstimator<Real>::operator()()
{
hipError_t cudaResult = hipSuccess;
struct hipDeviceProp_t deviceProperties;
struct hipFuncAttributes funcAttributes;
// Get device properties
cudaResult = hipGetDeviceProperties(&deviceProperties, m_device);
if (cudaResult != hipSuccess)
{
string msg("Could not get device properties: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (typeid(Real) == typeid(double) &&
(deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)))
{
throw std::runtime_error("Device does not have double precision support");
}
// Attach to GPU
cudaResult = hipSetDevice(m_device);
if (cudaResult != hipSuccess)
{
string msg("Could not set CUDA device: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Determine how to divide the work between cores
dim3 block;
dim3 grid;
block.x = m_threadBlockSize;
grid.x = (m_numSims + m_threadBlockSize - 1) / m_threadBlockSize;
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
unsigned int blocksPerSM = 10;
unsigned int numSMs = deviceProperties.multiProcessorCount;
while (grid.x > 2 * blocksPerSM * numSMs)
{
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
cudaResult = hipFuncGetAttributes(&funcAttributes, computeValue<Real>);
if (cudaResult != hipSuccess)
{
string msg("Could not get function attributes: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock)
{
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned int)deviceProperties.maxThreadsDim[0])
{
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned int)deviceProperties.maxGridSize[0])
{
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has two random numbers to give X and Y coordinate
Real *d_points = 0;
cudaResult = hipMalloc((void **)&d_points, 2 * m_numSims * sizeof(Real));
if (cudaResult != hipSuccess)
{
string msg("Could not allocate memory on device for random numbers: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
unsigned int *d_results = 0;
cudaResult = hipMalloc((void **)&d_results, grid.x * sizeof(unsigned int));
if (cudaResult != hipSuccess)
{
string msg("Could not allocate memory on device for partial results: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points in unit square
hiprandStatus_t curandResult;
hiprandGenerator_t qrng;
if (typeid(Real) == typeid(float))
{
curandResult = hiprandCreateGenerator(&qrng, HIPRAND_RNG_QUASI_SOBOL32);
}
else if (typeid(Real) == typeid(double))
{
curandResult = hiprandCreateGenerator(&qrng, HIPRAND_RNG_QUASI_SOBOL64);
}
else
{
string msg("Could not create random number generator of specified type");
throw std::runtime_error(msg);
}
if (curandResult != HIPRAND_STATUS_SUCCESS)
{
string msg("Could not create quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandSetQuasiRandomGeneratorDimensions(qrng, 2);
if (curandResult != HIPRAND_STATUS_SUCCESS)
{
string msg("Could not set number of dimensions for quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandSetGeneratorOrdering(qrng, CURAND_ORDERING_QUASI_DEFAULT);
if (curandResult != HIPRAND_STATUS_SUCCESS)
{
string msg("Could not set order for quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
if (typeid(Real) == typeid(float))
{
curandResult = hiprandGenerateUniform(qrng, (float *)d_points, 2 * m_numSims);
}
else if (typeid(Real) == typeid(double))
{
curandResult = hiprandGenerateUniformDouble(qrng, (double *)d_points, 2 * m_numSims);
}
else
{
string msg("Could not generate random numbers of specified type");
throw std::runtime_error(msg);
}
if (curandResult != HIPRAND_STATUS_SUCCESS)
{
string msg("Could not generate quasi-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandDestroyGenerator(qrng);
if (curandResult != HIPRAND_STATUS_SUCCESS)
{
string msg("Could not destroy quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Count the points inside unit quarter-circle
hipLaunchKernelGGL(( computeValue<Real>), dim3(grid), dim3(block), block.x *sizeof(unsigned int), 0, d_results, d_points, m_numSims);
// Copy partial results back
vector<unsigned int> results(grid.x);
cudaResult = hipMemcpy(&results[0], d_results, grid.x * sizeof(unsigned int), hipMemcpyDeviceToHost);
if (cudaResult != hipSuccess)
{
string msg("Could not copy partial results to host: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction on host
Real value = static_cast<Real>(std::accumulate(results.begin(), results.end(), 0));
// Determine the proportion of points inside the quarter-circle,
// i.e. the area of the unit quarter-circle
value /= m_numSims;
// Value is currently an estimate of the area of a unit quarter-circle, so we can
// scale to a full circle by multiplying by four. Now since the area of a circle
// is pi * r^2, and r is one, the value will be an estimate for the value of pi.
value *= 4;
// Cleanup
if (d_points)
{
hipFree(d_points);
d_points = 0;
}
if (d_results)
{
hipFree(d_results);
d_results = 0;
}
return value;
}
// Explicit template instantiation
template class PiEstimator<float>;
template class PiEstimator<double>;
| 0fa93471cdb1a6bcca0799741976301e6fc3d7b3.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "../inc/piestimator.h"
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <cuda_runtime.h>
#include <curand.h>
using std::string;
using std::vector;
__device__ unsigned int reduce_sum(unsigned int in)
{
extern __shared__ unsigned int sdata[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdata[ltid] += sdata[ltid + s];
}
__syncthreads();
}
return sdata[0];
}
// Estimator kernel
template <typename Real>
__global__ void computeValue(unsigned int *const results,
const Real *const points,
const unsigned int numSims)
{
// Determine thread ID
unsigned int bid = blockIdx.x;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const Real *pointx = points + tid;
const Real *pointy = pointx + numSims;
// Count the number of points which lie inside the unit quarter-circle
unsigned int pointsInside = 0;
for (unsigned int i = tid ; i < numSims ; i += step, pointx += step, pointy += step)
{
Real x = *pointx;
Real y = *pointy;
Real l2norm2 = x * x + y * y;
if (l2norm2 < static_cast<Real>(1))
{
pointsInside++;
}
}
// Reduce within the block
pointsInside = reduce_sum(pointsInside);
// Store the result
if (threadIdx.x == 0)
{
results[bid] = pointsInside;
}
}
template <typename Real>
PiEstimator<Real>::PiEstimator(unsigned int numSims, unsigned int device, unsigned int threadBlockSize)
: m_numSims(numSims),
m_device(device),
m_threadBlockSize(threadBlockSize)
{
}
template <typename Real>
Real PiEstimator<Real>::operator()()
{
cudaError_t cudaResult = cudaSuccess;
struct cudaDeviceProp deviceProperties;
struct cudaFuncAttributes funcAttributes;
// Get device properties
cudaResult = cudaGetDeviceProperties(&deviceProperties, m_device);
if (cudaResult != cudaSuccess)
{
string msg("Could not get device properties: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (typeid(Real) == typeid(double) &&
(deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)))
{
throw std::runtime_error("Device does not have double precision support");
}
// Attach to GPU
cudaResult = cudaSetDevice(m_device);
if (cudaResult != cudaSuccess)
{
string msg("Could not set CUDA device: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Determine how to divide the work between cores
dim3 block;
dim3 grid;
block.x = m_threadBlockSize;
grid.x = (m_numSims + m_threadBlockSize - 1) / m_threadBlockSize;
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
unsigned int blocksPerSM = 10;
unsigned int numSMs = deviceProperties.multiProcessorCount;
while (grid.x > 2 * blocksPerSM * numSMs)
{
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue<Real>);
if (cudaResult != cudaSuccess)
{
string msg("Could not get function attributes: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock)
{
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned int)deviceProperties.maxThreadsDim[0])
{
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned int)deviceProperties.maxGridSize[0])
{
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has two random numbers to give X and Y coordinate
Real *d_points = 0;
cudaResult = cudaMalloc((void **)&d_points, 2 * m_numSims * sizeof(Real));
if (cudaResult != cudaSuccess)
{
string msg("Could not allocate memory on device for random numbers: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
unsigned int *d_results = 0;
cudaResult = cudaMalloc((void **)&d_results, grid.x * sizeof(unsigned int));
if (cudaResult != cudaSuccess)
{
string msg("Could not allocate memory on device for partial results: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points in unit square
curandStatus_t curandResult;
curandGenerator_t qrng;
if (typeid(Real) == typeid(float))
{
curandResult = curandCreateGenerator(&qrng, CURAND_RNG_QUASI_SOBOL32);
}
else if (typeid(Real) == typeid(double))
{
curandResult = curandCreateGenerator(&qrng, CURAND_RNG_QUASI_SOBOL64);
}
else
{
string msg("Could not create random number generator of specified type");
throw std::runtime_error(msg);
}
if (curandResult != CURAND_STATUS_SUCCESS)
{
string msg("Could not create quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandSetQuasiRandomGeneratorDimensions(qrng, 2);
if (curandResult != CURAND_STATUS_SUCCESS)
{
string msg("Could not set number of dimensions for quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandSetGeneratorOrdering(qrng, CURAND_ORDERING_QUASI_DEFAULT);
if (curandResult != CURAND_STATUS_SUCCESS)
{
string msg("Could not set order for quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
if (typeid(Real) == typeid(float))
{
curandResult = curandGenerateUniform(qrng, (float *)d_points, 2 * m_numSims);
}
else if (typeid(Real) == typeid(double))
{
curandResult = curandGenerateUniformDouble(qrng, (double *)d_points, 2 * m_numSims);
}
else
{
string msg("Could not generate random numbers of specified type");
throw std::runtime_error(msg);
}
if (curandResult != CURAND_STATUS_SUCCESS)
{
string msg("Could not generate quasi-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandDestroyGenerator(qrng);
if (curandResult != CURAND_STATUS_SUCCESS)
{
string msg("Could not destroy quasi-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Count the points inside unit quarter-circle
computeValue<Real><<<grid, block, block.x *sizeof(unsigned int)>>>(d_results, d_points, m_numSims);
// Copy partial results back
vector<unsigned int> results(grid.x);
cudaResult = cudaMemcpy(&results[0], d_results, grid.x * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cudaResult != cudaSuccess)
{
string msg("Could not copy partial results to host: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction on host
Real value = static_cast<Real>(std::accumulate(results.begin(), results.end(), 0));
// Determine the proportion of points inside the quarter-circle,
// i.e. the area of the unit quarter-circle
value /= m_numSims;
// Value is currently an estimate of the area of a unit quarter-circle, so we can
// scale to a full circle by multiplying by four. Now since the area of a circle
// is pi * r^2, and r is one, the value will be an estimate for the value of pi.
value *= 4;
// Cleanup
if (d_points)
{
cudaFree(d_points);
d_points = 0;
}
if (d_results)
{
cudaFree(d_results);
d_results = 0;
}
return value;
}
// Explicit template instantiation
template class PiEstimator<float>;
template class PiEstimator<double>;
|
02219254d3ecb677a3e50fee49566d9a3ee885f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
int cant = 512;
int cant_elem = cant * cant;
// arreglos usados como matrices
int* arreglo_A;
int* arreglo_B;
int* arreglo_C;
int* d_arreglo_A;
int* d_arreglo_B;
int* d_arreglo_C;
int* d_arreglo_AT;
int* d_arreglo_BT;
void printi(int i){
printf("%d\n", i);
}
void init_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
array[i] = i;
}
}
void print_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
printi(array[i]);
}
}
// calcula la transpuesta out-place
__global__ void transposeador(int* arreglo_b, int* arreglo_bt, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = (int)tid / N; // columna
int j = (int)tid % N; // fila
if((i<N) && (j<N)){
arreglo_bt[i*N+j] = arreglo_b[j*N+i];
}
}
// copia B en C
__global__ void copiador(int* arreglo_b, int* arreglo_c, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
arreglo_c[tid] = arreglo_b[tid];
}
// C += A
__global__ void sumador(int* arreglo_a, int* arreglo_c, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
arreglo_c[tid] += arreglo_a[tid];
}
// C += A * B^t
__global__ void multiplicador(int* arreglo_a, int* arreglo_b_trans, int* arreglo_c, int N, int total_elem){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = (int)tid / N; // columna
int j = (int)tid % N; // fila
int k;
int cuenta = 0;
if(tid < total_elem)
{
for (k=0; k< N; k++){
cuenta += arreglo_a[i*N+k] * arreglo_b_trans[k*N+j];
}
arreglo_c[tid] += cuenta;
}
}
void solucion_CPU(){
int* arreglo_at;
int* arreglo_bt;
int* arreglo_a_por_b;
int* arreglo_res;
int numBytes = sizeof(int) * cant_elem; //bytes a alocar
arreglo_at = (int *) malloc(numBytes);
arreglo_bt = (int *) malloc(numBytes);
arreglo_a_por_b = (int *) malloc(numBytes); // resultado de A * B^t
arreglo_res = (int *) malloc(numBytes);
double timetick;
timetick = dwalltime();
// calculamos la transpeusta de arreglo_B
for (int i = 0; i < cant_elem; ++i)
{
int col = i / cant; // columna
int row = i % cant; // fila
arreglo_bt[col*cant+row] = arreglo_B[row*cant+col];
}
// calculamos la transpeusta de arreglo_A
for (int i = 0; i < cant_elem; ++i)
{
int col = i / cant; // columna
int row = i % cant; // fila
arreglo_at[col*cant+row] = arreglo_A[row*cant+col];
}
// A * B^t
for (int i = 0; i < cant_elem; i++)
{
int col = i / cant; // columna
int row = i % cant; // fila
int mul = 0;
for (int k=0; k< cant; k++){
mul += arreglo_A[col*cant+k] * arreglo_bt[k*cant+row];
}
arreglo_a_por_b[i] = mul;
}
for (int i = 0; i < cant_elem; i++){
arreglo_res[i] = 0;
}
// C = B + A * B^t + A^t
for (int i = 0; i < cant_elem; i++){
arreglo_res[i] += arreglo_B[i] + arreglo_a_por_b[i] + arreglo_at[i];
}
printf("-> Tiempo transcurrido en la CPU %f\n", dwalltime() - timetick);
// printf("%s\n", "");
// printf("%s\n", "Resultados CPU:");
// for (int i = 0; i < cant_elem; i++){
// printf("%d\n", arreglo_res[i]);
// }
free(arreglo_at);
free(arreglo_bt);
free(arreglo_a_por_b);
free(arreglo_res);
}
int main(int argc, char** argv){
int numBytes = sizeof(int) * cant_elem; //bytes a alocar
arreglo_A = (int *) malloc(numBytes);
arreglo_B = (int *) malloc(numBytes);
arreglo_C = (int *) malloc(numBytes);
double timetick;
// llenamos los arreglos
init_CPU_array(arreglo_A, cant_elem);
init_CPU_array(arreglo_B, cant_elem);
init_CPU_array(arreglo_C, cant_elem);
// allocamos memoria en la gpu
hipMalloc(&d_arreglo_A, numBytes);
hipMalloc(&d_arreglo_B, numBytes);
hipMalloc(&d_arreglo_C, numBytes);
hipMalloc(&d_arreglo_AT, numBytes);
hipMalloc(&d_arreglo_BT, numBytes);
// copiamos los datos de la cpu a la gpu
hipMemcpy(d_arreglo_A, arreglo_A, numBytes, hipMemcpyHostToDevice);
hipMemcpy(d_arreglo_B, arreglo_B, numBytes, hipMemcpyHostToDevice);
dim3 miGrid1D(512,1);
dim3 miBloque1D(512,1);
timetick = dwalltime();
// C = B
hipLaunchKernelGGL(( copiador), dim3(miGrid1D), dim3(miBloque1D), 0, 0, d_arreglo_B, d_arreglo_C, cant_elem);
// B^t
hipLaunchKernelGGL(( transposeador), dim3(miGrid1D), dim3(miBloque1D), 0, 0, d_arreglo_B, d_arreglo_BT, cant);
// C += A * B^t
hipLaunchKernelGGL(( multiplicador) , dim3(miGrid1D), dim3(miBloque1D), 0, 0, d_arreglo_A, d_arreglo_BT, d_arreglo_C, cant, cant_elem);
// A^t
hipLaunchKernelGGL(( transposeador), dim3(miGrid1D), dim3(miBloque1D), 0, 0, d_arreglo_A, d_arreglo_AT, cant);
// C += A^t
hipLaunchKernelGGL(( sumador), dim3(miGrid1D), dim3(miBloque1D), 0, 0, d_arreglo_AT, d_arreglo_C, cant_elem);
// Esperamos a que termine la ejecucion
hipDeviceSynchronize();
printf("-> Tiempo transcurrido en la GPU %f\n", dwalltime() - timetick);
// nos traemos los resultados de la gpu a la cpu
hipMemcpy(arreglo_C, d_arreglo_C, numBytes, hipMemcpyDeviceToHost);
//imprimimos los resultados
// printf("%s\n", "");
// printf("%s\n", "Resultados GPU:");
// print_CPU_array(arreglo_C, cant_elem);
solucion_CPU();
free(arreglo_A);
free(arreglo_B);
free(arreglo_C);
hipFree (d_arreglo_A);
hipFree (d_arreglo_B);
hipFree (d_arreglo_C);
}
| 02219254d3ecb677a3e50fee49566d9a3ee885f6.cu | #include "cuda.h"
#include "stdio.h"
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
int cant = 512;
int cant_elem = cant * cant;
// arreglos usados como matrices
int* arreglo_A;
int* arreglo_B;
int* arreglo_C;
int* d_arreglo_A;
int* d_arreglo_B;
int* d_arreglo_C;
int* d_arreglo_AT;
int* d_arreglo_BT;
void printi(int i){
printf("%d\n", i);
}
void init_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
array[i] = i;
}
}
void print_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
printi(array[i]);
}
}
// calcula la transpuesta out-place
__global__ void transposeador(int* arreglo_b, int* arreglo_bt, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = (int)tid / N; // columna
int j = (int)tid % N; // fila
if((i<N) && (j<N)){
arreglo_bt[i*N+j] = arreglo_b[j*N+i];
}
}
// copia B en C
__global__ void copiador(int* arreglo_b, int* arreglo_c, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
arreglo_c[tid] = arreglo_b[tid];
}
// C += A
__global__ void sumador(int* arreglo_a, int* arreglo_c, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
arreglo_c[tid] += arreglo_a[tid];
}
// C += A * B^t
__global__ void multiplicador(int* arreglo_a, int* arreglo_b_trans, int* arreglo_c, int N, int total_elem){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = (int)tid / N; // columna
int j = (int)tid % N; // fila
int k;
int cuenta = 0;
if(tid < total_elem)
{
for (k=0; k< N; k++){
cuenta += arreglo_a[i*N+k] * arreglo_b_trans[k*N+j];
}
arreglo_c[tid] += cuenta;
}
}
void solucion_CPU(){
int* arreglo_at;
int* arreglo_bt;
int* arreglo_a_por_b;
int* arreglo_res;
int numBytes = sizeof(int) * cant_elem; //bytes a alocar
arreglo_at = (int *) malloc(numBytes);
arreglo_bt = (int *) malloc(numBytes);
arreglo_a_por_b = (int *) malloc(numBytes); // resultado de A * B^t
arreglo_res = (int *) malloc(numBytes);
double timetick;
timetick = dwalltime();
// calculamos la transpeusta de arreglo_B
for (int i = 0; i < cant_elem; ++i)
{
int col = i / cant; // columna
int row = i % cant; // fila
arreglo_bt[col*cant+row] = arreglo_B[row*cant+col];
}
// calculamos la transpeusta de arreglo_A
for (int i = 0; i < cant_elem; ++i)
{
int col = i / cant; // columna
int row = i % cant; // fila
arreglo_at[col*cant+row] = arreglo_A[row*cant+col];
}
// A * B^t
for (int i = 0; i < cant_elem; i++)
{
int col = i / cant; // columna
int row = i % cant; // fila
int mul = 0;
for (int k=0; k< cant; k++){
mul += arreglo_A[col*cant+k] * arreglo_bt[k*cant+row];
}
arreglo_a_por_b[i] = mul;
}
for (int i = 0; i < cant_elem; i++){
arreglo_res[i] = 0;
}
// C = B + A * B^t + A^t
for (int i = 0; i < cant_elem; i++){
arreglo_res[i] += arreglo_B[i] + arreglo_a_por_b[i] + arreglo_at[i];
}
printf("-> Tiempo transcurrido en la CPU %f\n", dwalltime() - timetick);
// printf("%s\n", "");
// printf("%s\n", "Resultados CPU:");
// for (int i = 0; i < cant_elem; i++){
// printf("%d\n", arreglo_res[i]);
// }
free(arreglo_at);
free(arreglo_bt);
free(arreglo_a_por_b);
free(arreglo_res);
}
int main(int argc, char** argv){
int numBytes = sizeof(int) * cant_elem; //bytes a alocar
arreglo_A = (int *) malloc(numBytes);
arreglo_B = (int *) malloc(numBytes);
arreglo_C = (int *) malloc(numBytes);
double timetick;
// llenamos los arreglos
init_CPU_array(arreglo_A, cant_elem);
init_CPU_array(arreglo_B, cant_elem);
init_CPU_array(arreglo_C, cant_elem);
// allocamos memoria en la gpu
cudaMalloc(&d_arreglo_A, numBytes);
cudaMalloc(&d_arreglo_B, numBytes);
cudaMalloc(&d_arreglo_C, numBytes);
cudaMalloc(&d_arreglo_AT, numBytes);
cudaMalloc(&d_arreglo_BT, numBytes);
// copiamos los datos de la cpu a la gpu
cudaMemcpy(d_arreglo_A, arreglo_A, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_arreglo_B, arreglo_B, numBytes, cudaMemcpyHostToDevice);
dim3 miGrid1D(512,1);
dim3 miBloque1D(512,1);
timetick = dwalltime();
// C = B
copiador<<<miGrid1D, miBloque1D>>>(d_arreglo_B, d_arreglo_C, cant_elem);
// B^t
transposeador<<<miGrid1D, miBloque1D>>>(d_arreglo_B, d_arreglo_BT, cant);
// C += A * B^t
multiplicador <<<miGrid1D, miBloque1D>>>(d_arreglo_A, d_arreglo_BT, d_arreglo_C, cant, cant_elem);
// A^t
transposeador<<<miGrid1D, miBloque1D>>>(d_arreglo_A, d_arreglo_AT, cant);
// C += A^t
sumador<<<miGrid1D, miBloque1D>>>(d_arreglo_AT, d_arreglo_C, cant_elem);
// Esperamos a que termine la ejecucion
cudaThreadSynchronize();
printf("-> Tiempo transcurrido en la GPU %f\n", dwalltime() - timetick);
// nos traemos los resultados de la gpu a la cpu
cudaMemcpy(arreglo_C, d_arreglo_C, numBytes, cudaMemcpyDeviceToHost);
//imprimimos los resultados
// printf("%s\n", "");
// printf("%s\n", "Resultados GPU:");
// print_CPU_array(arreglo_C, cant_elem);
solucion_CPU();
free(arreglo_A);
free(arreglo_B);
free(arreglo_C);
cudaFree (d_arreglo_A);
cudaFree (d_arreglo_B);
cudaFree (d_arreglo_C);
}
|
6dd06719e0b92dcf1630cb51abad81abfc02ba3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgeadd_batched.cu, normal z -> c, Sun Nov 20 20:20:30 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches clacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
cgeadd_batched_kernel(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaFloatComplex *dA = dAarray[ blockIdx.y ];
magmaFloatComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaFloatComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_cgeadd_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
hipLaunchKernelGGL(( cgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
| 6dd06719e0b92dcf1630cb51abad81abfc02ba3e.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgeadd_batched.cu, normal z -> c, Sun Nov 20 20:20:30 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches clacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
cgeadd_batched_kernel(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaFloatComplex *dA = dAarray[ blockIdx.y ];
magmaFloatComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaFloatComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_cgeadd_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
cgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
|
a6bd550de83d4c613905ed79710da25ac49e9278.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "cudaCommon.h"
#include "cudaGradientKernels.h"
#include "cudaSourceScalarPotential.h"
#define BLOCKDIMX 18
#define BLOCKDIMY 18
template <geometryType_t coords>
__global__ void cukern_computeScalarGradient3D(double *phi, double *f_x, double *f_y, double *f_z, int3 arraysize);
template <geometryType_t coords>
__global__ void cukern_computeScalarGradient2D(double *phi, double *fx, double *fy, int3 arraysize);
__global__ void cukern_applyPotentialGradient3D(double *fluid, double *fx, double *fy, double *fz, unsigned int arrayNumel);
__global__ void cukern_applyPotentialGradient2D(double *fluid, double *fx, double *fy, unsigned int arrayNumel);
__constant__ __device__ double devLambda[9];
#define LAMX devLambda[0]
#define LAMY devLambda[1]
#define LAMZ devLambda[2]
// Define: F = -beta * rho * grad(phi)
// rho_g = density for full effect of gravity
// rho_c = minimum density to feel gravity at all
// beta = { rho_g < rho : 1 }
// { rho_c < rho < rho_g : [(rho-rho_c)/(rho_rho_g-rho_c)]^2 }
// { rho < rho_c : 0 }
// This provides a continuous (though not differentiable at rho = rho_g) way to surpress gravitation of the background fluid
// The original process of cutting gravity off below a critical density a few times the minimum
// density is believed to cause "blowups" at the inner edge of circular flow profiles due to being
// discontinuous. If even smoothness is insufficient and smooth differentiability is required,
// a more-times-continuous profile can be constructed, but let's not go there unless forced.
// Density below which we force gravity effects to zero
#define RHOMIN devLambda[3]
#define RHOGRAV devLambda[4]
// 1 / (rho_g - rho_c)
#define G1 devLambda[5]
// rho_c / (rho_g - rho_c)
#define G2 devLambda[6]
#define RINNER devLambda[7]
#define DELTAR devLambda[8]
__constant__ __device__ unsigned int devSlabdim[3];
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs!=4) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaApplyScalarPotential(FluidManager, phi, GeometryManager, [dt, rho_nograv, rho_fullgrav])\n");
if(CHECK_CUDA_ERROR("entering cudaSourceScalarPotential") != SUCCESSFUL) { DROP_MEX_ERROR("Failed upon entry to cudaSourceScalarPotential."); }
// Get source array info and create destination arrays
MGArray fluid[5];
MGArray phi;
int worked = MGA_accessMatlabArrays(prhs, 1, 1, &phi);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access input arrays."); }
// Each partition uses the same common parameters
GeometryParams geom = accessMatlabGeometryClass(prhs[2]); // FIXME check for fail & return
int ne = mxGetNumberOfElements(prhs[3]);
if(ne != 3) {
printf("Input argument 3 has %i arguments, not three. Require precisely 3: [dt rho_nog rho_fullg]\n", ne);
DROP_MEX_ERROR("Crashing.");
}
double *sp = mxGetPr(prhs[3]);
double dt = sp[0]; /* dt */
double rhoMinimum = sp[1]; /* minimum rho, rho_c */
double rhoFull = sp[2]; /* rho_g */
int numFluids = mxGetNumberOfElements(prhs[0]);
int fluidct;
for(fluidct = 0; fluidct < numFluids; fluidct++) {
worked = MGA_accessFluidCanister(prhs[0], fluidct, &fluid[0]);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
mxArray *flprop = mxGetProperty(prhs[0], fluidct, "MINMASS");
if(flprop != NULL) {
rhoMinimum = *((double *)mxGetPr(flprop));
} else {
worked = ERROR_NULL_POINTER;
break;
}
worked = sourcefunction_ScalarPotential(&fluid[0], &phi, dt, geom, rhoMinimum, rhoFull);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) { DROP_MEX_ERROR("cudaSourceScalarPotential failed"); }
}
int sourcefunction_ScalarPotential(MGArray *fluid, MGArray *phi, double dt, GeometryParams geom, double minRho, double rhoFullGravity)
{
double *dx = &geom.h[0];
dim3 gridsize, blocksize;
int3 arraysize;
int i, sub[6];
int worked;
double lambda[9];
lambda[0] = dt/(2.0*dx[0]);
lambda[1] = dt/(2.0*dx[1]);
lambda[2] = dt/(2.0*dx[2]);
lambda[3] = minRho; /* minimum rho, rho_c */
lambda[4] = rhoFullGravity; /* rho_g */
lambda[5] = 1.0/(lambda[4] - lambda[3]); /* 1/(rho_g - rho_c) */
lambda[6] = lambda[3]*lambda[5];
lambda[7] = geom.Rinner;
lambda[8] = dx[1];
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
hipMemcpyToSymbol((const void *)devLambda, lambda, 9*sizeof(double), 0, hipMemcpyHostToDevice);
unsigned int sd[3];
sd[0] = (unsigned int)(fluid->slabPitch[i] / 8);
hipMemcpyToSymbol((const void *)devSlabdim, sd, 1*sizeof(int), 0, hipMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("hipMemcpyToSymbol");
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
int isThreeD = (fluid->dim[2] > 1);
MGArray gradientStorage;
worked = MGA_allocSlab(fluid, &gradientStorage, 3);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
worked = computeCentralGradient(phi, &gradientStorage, geom, 2, dt);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
double *gs;
// Iterate over all partitions, and here we GO!
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, sub);
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(BLOCKDIMX, BLOCKDIMY, 1);
gridsize.x = arraysize.x / (blocksize.x - 2); gridsize.x += ((blocksize.x-2) * gridsize.x < arraysize.x);
gridsize.y = arraysize.y / (blocksize.y - 2); gridsize.y += ((blocksize.y-2) * gridsize.y < arraysize.y);
gridsize.z = 1;
gs = gradientStorage.devicePtr[i];
if(isThreeD) {
hipLaunchKernelGGL(( cukern_applyPotentialGradient3D), dim3(32), dim3(256), 0, 0, fluid[0].devicePtr[i], gs, gs+fluid->slabPitch[i]/8, gs+2*fluid->slabPitch[i]/8, fluid->partNumel[i]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "cukern_applyPotentialGradient3D");
if(worked != SUCCESSFUL) break;
} else {
hipLaunchKernelGGL(( cukern_applyPotentialGradient2D), dim3(32), dim3(256), 0, 0, fluid[0].devicePtr[i], gs, gs+fluid->slabPitch[i]/8, fluid->partNumel[i]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "cukern_applyPotentialGradient2D");
if(worked != SUCCESSFUL) break;
}
}
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
worked = MGA_delete(&gradientStorage);
return CHECK_IMOGEN_ERROR(worked);
}
/* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*
* Exact integrals at fixed position:
* P2 = P1 - rho grad(phi) t
* E2 = E1 - P1 \cdot grad(phi) t + .5 rho grad(phi) \cdot grad(phi) t^2
* = E1 - dt grad(phi) \cdot ( P1 - .5 * rho * grad(phi) ) */
__global__ void cukern_applyPotentialGradient3D(double *fluid, double *fx, double *fy, double *fz, unsigned int arrayNumel)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= arrayNumel) return;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
double locrho, ener, mom;
for(; globAddr < arrayNumel; globAddr += blockDim.x*gridDim.x) {
ener = 0;
locrho = fluid[globAddr]; // rho(z) -> rho
if(locrho > rhomin) {
mom = fluid[globAddr + 2*devSlabdim[0]]; // load px(z) -> phiC
deltaphi = fx[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener = -deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 2*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
mom = fluid[globAddr + 3*devSlabdim[0]]; // load py(z) -> phiC
deltaphi = fy[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener -= deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 3*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
mom = fluid[globAddr + 4*devSlabdim[0]];
deltaphi = fz[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener -= deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 4*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
// Store changed kinetic energy
fluid[globAddr + devSlabdim[0]] += ener;
}
}
}
/* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*
* Exact integrals at fixed position:
* P2 = P1 - rho grad(phi) t
* E2 = E1 - P1 \cdot grad(phi) t + .5 rho grad(phi) \cdot grad(phi) t^2
* = E1 - dt grad(phi) \cdot ( P1 - .5 * rho * grad(phi) ) */
__global__ void cukern_applyPotentialGradient2D(double *fluid, double *fx, double *fy, unsigned int arrayNumel)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= arrayNumel) return;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
double locrho, ener, mom;
for(; globAddr < arrayNumel; globAddr += blockDim.x*gridDim.x) {
ener = 0;
locrho = fluid[globAddr]; // rho(z) -> rho
if(locrho > rhomin) {
mom = fluid[globAddr + 2*devSlabdim[0]]; // load px(z) -> phiC
deltaphi = fx[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener = -deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 2*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
mom = fluid[globAddr + 3*devSlabdim[0]]; // load py(z) -> phiC
deltaphi = fy[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener -= deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 3*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
// Store change to kinetic energy
fluid[globAddr + devSlabdim[0]] += ener;
}
}
}
| a6bd550de83d4c613905ed79710da25ac49e9278.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cudaCommon.h"
#include "cudaGradientKernels.h"
#include "cudaSourceScalarPotential.h"
#define BLOCKDIMX 18
#define BLOCKDIMY 18
template <geometryType_t coords>
__global__ void cukern_computeScalarGradient3D(double *phi, double *f_x, double *f_y, double *f_z, int3 arraysize);
template <geometryType_t coords>
__global__ void cukern_computeScalarGradient2D(double *phi, double *fx, double *fy, int3 arraysize);
__global__ void cukern_applyPotentialGradient3D(double *fluid, double *fx, double *fy, double *fz, unsigned int arrayNumel);
__global__ void cukern_applyPotentialGradient2D(double *fluid, double *fx, double *fy, unsigned int arrayNumel);
__constant__ __device__ double devLambda[9];
#define LAMX devLambda[0]
#define LAMY devLambda[1]
#define LAMZ devLambda[2]
// Define: F = -beta * rho * grad(phi)
// rho_g = density for full effect of gravity
// rho_c = minimum density to feel gravity at all
// beta = { rho_g < rho : 1 }
// { rho_c < rho < rho_g : [(rho-rho_c)/(rho_rho_g-rho_c)]^2 }
// { rho < rho_c : 0 }
// This provides a continuous (though not differentiable at rho = rho_g) way to surpress gravitation of the background fluid
// The original process of cutting gravity off below a critical density a few times the minimum
// density is believed to cause "blowups" at the inner edge of circular flow profiles due to being
// discontinuous. If even smoothness is insufficient and smooth differentiability is required,
// a more-times-continuous profile can be constructed, but let's not go there unless forced.
// Density below which we force gravity effects to zero
#define RHOMIN devLambda[3]
#define RHOGRAV devLambda[4]
// 1 / (rho_g - rho_c)
#define G1 devLambda[5]
// rho_c / (rho_g - rho_c)
#define G2 devLambda[6]
#define RINNER devLambda[7]
#define DELTAR devLambda[8]
__constant__ __device__ unsigned int devSlabdim[3];
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs!=4) || (nlhs != 0)) mexErrMsgTxt("Wrong number of arguments: need cudaApplyScalarPotential(FluidManager, phi, GeometryManager, [dt, rho_nograv, rho_fullgrav])\n");
if(CHECK_CUDA_ERROR("entering cudaSourceScalarPotential") != SUCCESSFUL) { DROP_MEX_ERROR("Failed upon entry to cudaSourceScalarPotential."); }
// Get source array info and create destination arrays
MGArray fluid[5];
MGArray phi;
int worked = MGA_accessMatlabArrays(prhs, 1, 1, &phi);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) { DROP_MEX_ERROR("Failed to access input arrays."); }
// Each partition uses the same common parameters
GeometryParams geom = accessMatlabGeometryClass(prhs[2]); // FIXME check for fail & return
int ne = mxGetNumberOfElements(prhs[3]);
if(ne != 3) {
printf("Input argument 3 has %i arguments, not three. Require precisely 3: [dt rho_nog rho_fullg]\n", ne);
DROP_MEX_ERROR("Crashing.");
}
double *sp = mxGetPr(prhs[3]);
double dt = sp[0]; /* dt */
double rhoMinimum = sp[1]; /* minimum rho, rho_c */
double rhoFull = sp[2]; /* rho_g */
int numFluids = mxGetNumberOfElements(prhs[0]);
int fluidct;
for(fluidct = 0; fluidct < numFluids; fluidct++) {
worked = MGA_accessFluidCanister(prhs[0], fluidct, &fluid[0]);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
mxArray *flprop = mxGetProperty(prhs[0], fluidct, "MINMASS");
if(flprop != NULL) {
rhoMinimum = *((double *)mxGetPr(flprop));
} else {
worked = ERROR_NULL_POINTER;
break;
}
worked = sourcefunction_ScalarPotential(&fluid[0], &phi, dt, geom, rhoMinimum, rhoFull);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) { DROP_MEX_ERROR("cudaSourceScalarPotential failed"); }
}
int sourcefunction_ScalarPotential(MGArray *fluid, MGArray *phi, double dt, GeometryParams geom, double minRho, double rhoFullGravity)
{
double *dx = &geom.h[0];
dim3 gridsize, blocksize;
int3 arraysize;
int i, sub[6];
int worked;
double lambda[9];
lambda[0] = dt/(2.0*dx[0]);
lambda[1] = dt/(2.0*dx[1]);
lambda[2] = dt/(2.0*dx[2]);
lambda[3] = minRho; /* minimum rho, rho_c */
lambda[4] = rhoFullGravity; /* rho_g */
lambda[5] = 1.0/(lambda[4] - lambda[3]); /* 1/(rho_g - rho_c) */
lambda[6] = lambda[3]*lambda[5];
lambda[7] = geom.Rinner;
lambda[8] = dx[1];
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
cudaMemcpyToSymbol((const void *)devLambda, lambda, 9*sizeof(double), 0, cudaMemcpyHostToDevice);
unsigned int sd[3];
sd[0] = (unsigned int)(fluid->slabPitch[i] / 8);
cudaMemcpyToSymbol((const void *)devSlabdim, sd, 1*sizeof(int), 0, cudaMemcpyHostToDevice);
worked = CHECK_CUDA_ERROR("cudaMemcpyToSymbol");
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) break;
}
if(worked != SUCCESSFUL) return worked;
int isThreeD = (fluid->dim[2] > 1);
MGArray gradientStorage;
worked = MGA_allocSlab(fluid, &gradientStorage, 3);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
worked = computeCentralGradient(phi, &gradientStorage, geom, 2, dt);
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
double *gs;
// Iterate over all partitions, and here we GO!
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, sub);
arraysize.x = sub[3]; arraysize.y = sub[4]; arraysize.z = sub[5];
blocksize = makeDim3(BLOCKDIMX, BLOCKDIMY, 1);
gridsize.x = arraysize.x / (blocksize.x - 2); gridsize.x += ((blocksize.x-2) * gridsize.x < arraysize.x);
gridsize.y = arraysize.y / (blocksize.y - 2); gridsize.y += ((blocksize.y-2) * gridsize.y < arraysize.y);
gridsize.z = 1;
gs = gradientStorage.devicePtr[i];
if(isThreeD) {
cukern_applyPotentialGradient3D<<<32, 256>>>(fluid[0].devicePtr[i], gs, gs+fluid->slabPitch[i]/8, gs+2*fluid->slabPitch[i]/8, fluid->partNumel[i]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "cukern_applyPotentialGradient3D");
if(worked != SUCCESSFUL) break;
} else {
cukern_applyPotentialGradient2D<<<32, 256>>>(fluid[0].devicePtr[i], gs, gs+fluid->slabPitch[i]/8, fluid->partNumel[i]);
worked = CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "cukern_applyPotentialGradient2D");
if(worked != SUCCESSFUL) break;
}
}
if(CHECK_IMOGEN_ERROR(worked) != SUCCESSFUL) return worked;
worked = MGA_delete(&gradientStorage);
return CHECK_IMOGEN_ERROR(worked);
}
/* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*
* Exact integrals at fixed position:
* P2 = P1 - rho grad(phi) t
* E2 = E1 - P1 \cdot grad(phi) t + .5 rho grad(phi) \cdot grad(phi) t^2
* = E1 - dt grad(phi) \cdot ( P1 - .5 * rho * grad(phi) ) */
__global__ void cukern_applyPotentialGradient3D(double *fluid, double *fx, double *fy, double *fz, unsigned int arrayNumel)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= arrayNumel) return;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
double locrho, ener, mom;
for(; globAddr < arrayNumel; globAddr += blockDim.x*gridDim.x) {
ener = 0;
locrho = fluid[globAddr]; // rho(z) -> rho
if(locrho > rhomin) {
mom = fluid[globAddr + 2*devSlabdim[0]]; // load px(z) -> phiC
deltaphi = fx[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener = -deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 2*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
mom = fluid[globAddr + 3*devSlabdim[0]]; // load py(z) -> phiC
deltaphi = fy[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener -= deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 3*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
mom = fluid[globAddr + 4*devSlabdim[0]];
deltaphi = fz[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener -= deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 4*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
// Store changed kinetic energy
fluid[globAddr + devSlabdim[0]] += ener;
}
}
}
/* dP = -rho grad(phi) dt
* dE = -rho v \cdot grad(phi) dt
*
* Exact integrals at fixed position:
* P2 = P1 - rho grad(phi) t
* E2 = E1 - P1 \cdot grad(phi) t + .5 rho grad(phi) \cdot grad(phi) t^2
* = E1 - dt grad(phi) \cdot ( P1 - .5 * rho * grad(phi) ) */
__global__ void cukern_applyPotentialGradient2D(double *fluid, double *fx, double *fy, unsigned int arrayNumel)
{
unsigned int globAddr = threadIdx.x + blockDim.x*blockIdx.x;
if(globAddr >= arrayNumel) return;
double deltaphi; // Store derivative of phi in one direction
double rhomin = devLambda[3];
double locrho, ener, mom;
for(; globAddr < arrayNumel; globAddr += blockDim.x*gridDim.x) {
ener = 0;
locrho = fluid[globAddr]; // rho(z) -> rho
if(locrho > rhomin) {
mom = fluid[globAddr + 2*devSlabdim[0]]; // load px(z) -> phiC
deltaphi = fx[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener = -deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 2*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
mom = fluid[globAddr + 3*devSlabdim[0]]; // load py(z) -> phiC
deltaphi = fy[globAddr];
if(locrho < RHOGRAV) { deltaphi *= (locrho*G1 - G2); } // G smoothly -> 0 as rho -> RHO_MIN
ener -= deltaphi*(mom-.5*locrho*deltaphi); // exact KE change
fluid[globAddr + 3*devSlabdim[0]] = mom - deltaphi*locrho; // store px <- px - dt * rho dphi/dx;
// Store change to kinetic energy
fluid[globAddr + devSlabdim[0]] += ener;
}
}
}
|
7bbca8adba48727c76fc1080b7f971cc14e358a0.hip | // !!! This is a file automatically generated by hipify!!!
/////////////////////////////////////////////////////////////////////////////////////////
// This code contains NVIDIA Confidential Information and is disclosed
// under the Mutual Non-Disclosure Agreement.
//
// Notice
// ALL NVIDIA DESIGN SPECIFICATIONS AND CODE ("MATERIALS") ARE PROVIDED "AS IS" NVIDIA MAKES
// NO REPRESENTATIONS, WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
//
// NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. No third party distribution is allowed unless
// expressly authorized by NVIDIA. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2015-2016 NVIDIA Corporation. All rights reserved.
//
// NVIDIA Corporation and its licensors retain all intellectual property and proprietary
// rights in and to this software and related documentation and any modifications thereto.
// Any use, reproduction, disclosure or distribution of this software and related
// documentation without an express license agreement from NVIDIA Corporation is
// strictly prohibited.
//
/////////////////////////////////////////////////////////////////////////////////////////
#include "utils.hpp"
#include <hip/hip_runtime.h>
__global__ void kernel(uint8_t* image, size_t pitch, const uint32_t width, const uint32_t height,
const uint32_t val)
{
const uint32_t tidx = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= width || tidy >= height) return;
image[tidy*pitch + 4*tidx+0] = (tidx + val) % 256;
image[tidy*pitch + 4*tidx+1] = (tidy + val) % 256;
image[tidy*pitch + 4*tidx+2] = (tidx + tidy + 2*val) % 256;
image[tidy*pitch + 4*tidx+3] = 255;
}
uint32_t iDivUp(const uint32_t a, const uint32_t b)
{
return ((a % b) != 0U) ? ((a / b) + 1U) : (a / b);
}
void runKernel(dwImageCUDA *image, const uint32_t val)
{
dim3 numThreads = dim3(32, 4, 1);
hipLaunchKernelGGL(( kernel) , dim3(iDivUp(image->prop.width, numThreads.x),
iDivUp(image->prop.height, numThreads.y)),
dim3(numThreads) , 0, 0, static_cast<uint8_t*>(image->dptr[0]), image->pitch[0], image->prop.width, image->prop.height, val);
}
| 7bbca8adba48727c76fc1080b7f971cc14e358a0.cu | /////////////////////////////////////////////////////////////////////////////////////////
// This code contains NVIDIA Confidential Information and is disclosed
// under the Mutual Non-Disclosure Agreement.
//
// Notice
// ALL NVIDIA DESIGN SPECIFICATIONS AND CODE ("MATERIALS") ARE PROVIDED "AS IS" NVIDIA MAKES
// NO REPRESENTATIONS, WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
//
// NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. No third party distribution is allowed unless
// expressly authorized by NVIDIA. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2015-2016 NVIDIA Corporation. All rights reserved.
//
// NVIDIA Corporation and its licensors retain all intellectual property and proprietary
// rights in and to this software and related documentation and any modifications thereto.
// Any use, reproduction, disclosure or distribution of this software and related
// documentation without an express license agreement from NVIDIA Corporation is
// strictly prohibited.
//
/////////////////////////////////////////////////////////////////////////////////////////
#include "utils.hpp"
#include <cuda.h>
__global__ void kernel(uint8_t* image, size_t pitch, const uint32_t width, const uint32_t height,
const uint32_t val)
{
const uint32_t tidx = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= width || tidy >= height) return;
image[tidy*pitch + 4*tidx+0] = (tidx + val) % 256;
image[tidy*pitch + 4*tidx+1] = (tidy + val) % 256;
image[tidy*pitch + 4*tidx+2] = (tidx + tidy + 2*val) % 256;
image[tidy*pitch + 4*tidx+3] = 255;
}
uint32_t iDivUp(const uint32_t a, const uint32_t b)
{
return ((a % b) != 0U) ? ((a / b) + 1U) : (a / b);
}
void runKernel(dwImageCUDA *image, const uint32_t val)
{
dim3 numThreads = dim3(32, 4, 1);
kernel <<<dim3(iDivUp(image->prop.width, numThreads.x),
iDivUp(image->prop.height, numThreads.y)),
numThreads >>>(static_cast<uint8_t*>(image->dptr[0]), image->pitch[0], image->prop.width, image->prop.height, val);
}
|
962408c682b622d6d34bdf66777d899400aa734e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SOR_13PT_CROSS_SOR_kernel.hu"
__global__ void kernel0(int *arr1, int *arr2, int trial, int padd, int len1, int len2, int c0)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
#define ppcg_max(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x > _y ? _x : _y; })
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
for (int c1 = 32 * b0 + 8192 * ((padd - 32 * b0 + 8160) / 8192); c1 < len1; c1 += 8192)
if (len1 >= t0 + c1 + 1 && t0 + c1 >= padd)
for (int c2 = 32 * b1 + 8192 * ((padd - 32 * b1 + 8160) / 8192); c2 < len2; c2 += 8192)
for (int c4 = ppcg_max(t1, t1 + 16 * ppcg_fdiv_q(padd - t1 - c2 - 1, 16) + 16); c4 <= ppcg_min(31, len2 - c2 - 1); c4 += 16)
arr2[(t0 + c1) * 4108 + (c2 + c4)] += (((((((((((((arr1[(t0 + c1 - 3) * 4108 + (c2 + c4)] + arr1[(t0 + c1 - 2) * 4108 + (c2 + c4)]) + arr1[(t0 + c1 - 1) * 4108 + (c2 + c4)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 - 3)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 - 2)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 - 1)]) + arr1[(t0 + c1) * 4108 + (c2 + c4)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 + 1)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 + 2)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 + 3)]) + arr1[(t0 + c1 + 1) * 4108 + (c2 + c4)]) + arr1[(t0 + c1 + 2) * 4108 + (c2 + c4)]) + arr1[(t0 + c1 + 3) * 4108 + (c2 + c4)]) / 13);
}
__global__ void kernel1(int *arr1, int *arr2, int trial, int padd, int len1, int len2, int c0)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
#define ppcg_max(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x > _y ? _x : _y; })
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
for (int c1 = 32 * b0 + 8192 * ((padd - 32 * b0 + 8160) / 8192); c1 < len1; c1 += 8192)
if (len1 >= t0 + c1 + 1 && t0 + c1 >= padd)
for (int c2 = 32 * b1 + 8192 * ((padd - 32 * b1 + 8160) / 8192); c2 < len2; c2 += 8192)
for (int c4 = ppcg_max(t1, t1 + 16 * ppcg_fdiv_q(padd - t1 - c2 - 1, 16) + 16); c4 <= ppcg_min(31, len2 - c2 - 1); c4 += 16)
arr1[(t0 + c1) * 4108 + (c2 + c4)] += (((((((((((((arr2[(t0 + c1 - 3) * 4108 + (c2 + c4)] + arr2[(t0 + c1 - 2) * 4108 + (c2 + c4)]) + arr2[(t0 + c1 - 1) * 4108 + (c2 + c4)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 - 3)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 - 2)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 - 1)]) + arr2[(t0 + c1) * 4108 + (c2 + c4)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 + 1)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 + 2)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 + 3)]) + arr2[(t0 + c1 + 1) * 4108 + (c2 + c4)]) + arr2[(t0 + c1 + 2) * 4108 + (c2 + c4)]) + arr2[(t0 + c1 + 3) * 4108 + (c2 + c4)]) / 13);
}
| 962408c682b622d6d34bdf66777d899400aa734e.cu | #include "SOR_13PT_CROSS_SOR_kernel.hu"
__global__ void kernel0(int *arr1, int *arr2, int trial, int padd, int len1, int len2, int c0)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
#define ppcg_max(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x > _y ? _x : _y; })
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
for (int c1 = 32 * b0 + 8192 * ((padd - 32 * b0 + 8160) / 8192); c1 < len1; c1 += 8192)
if (len1 >= t0 + c1 + 1 && t0 + c1 >= padd)
for (int c2 = 32 * b1 + 8192 * ((padd - 32 * b1 + 8160) / 8192); c2 < len2; c2 += 8192)
for (int c4 = ppcg_max(t1, t1 + 16 * ppcg_fdiv_q(padd - t1 - c2 - 1, 16) + 16); c4 <= ppcg_min(31, len2 - c2 - 1); c4 += 16)
arr2[(t0 + c1) * 4108 + (c2 + c4)] += (((((((((((((arr1[(t0 + c1 - 3) * 4108 + (c2 + c4)] + arr1[(t0 + c1 - 2) * 4108 + (c2 + c4)]) + arr1[(t0 + c1 - 1) * 4108 + (c2 + c4)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 - 3)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 - 2)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 - 1)]) + arr1[(t0 + c1) * 4108 + (c2 + c4)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 + 1)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 + 2)]) + arr1[(t0 + c1) * 4108 + (c2 + c4 + 3)]) + arr1[(t0 + c1 + 1) * 4108 + (c2 + c4)]) + arr1[(t0 + c1 + 2) * 4108 + (c2 + c4)]) + arr1[(t0 + c1 + 3) * 4108 + (c2 + c4)]) / 13);
}
__global__ void kernel1(int *arr1, int *arr2, int trial, int padd, int len1, int len2, int c0)
{
int b0 = blockIdx.y, b1 = blockIdx.x;
int t0 = threadIdx.y, t1 = threadIdx.x;
#define ppcg_min(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x < _y ? _x : _y; })
#define ppcg_max(x,y) ({ __typeof__(x) _x = (x); __typeof__(y) _y = (y); _x > _y ? _x : _y; })
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
for (int c1 = 32 * b0 + 8192 * ((padd - 32 * b0 + 8160) / 8192); c1 < len1; c1 += 8192)
if (len1 >= t0 + c1 + 1 && t0 + c1 >= padd)
for (int c2 = 32 * b1 + 8192 * ((padd - 32 * b1 + 8160) / 8192); c2 < len2; c2 += 8192)
for (int c4 = ppcg_max(t1, t1 + 16 * ppcg_fdiv_q(padd - t1 - c2 - 1, 16) + 16); c4 <= ppcg_min(31, len2 - c2 - 1); c4 += 16)
arr1[(t0 + c1) * 4108 + (c2 + c4)] += (((((((((((((arr2[(t0 + c1 - 3) * 4108 + (c2 + c4)] + arr2[(t0 + c1 - 2) * 4108 + (c2 + c4)]) + arr2[(t0 + c1 - 1) * 4108 + (c2 + c4)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 - 3)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 - 2)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 - 1)]) + arr2[(t0 + c1) * 4108 + (c2 + c4)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 + 1)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 + 2)]) + arr2[(t0 + c1) * 4108 + (c2 + c4 + 3)]) + arr2[(t0 + c1 + 1) * 4108 + (c2 + c4)]) + arr2[(t0 + c1 + 2) * 4108 + (c2 + c4)]) + arr2[(t0 + c1 + 3) * 4108 + (c2 + c4)]) / 13);
}
|
f56a48d7af0294e5974550ead6e8038c961a6919.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "freshman.h"
void sumArrays(float * a,float * b,float * res,const int size)
{
for(int i=0;i<size;i+=4)
{
res[i]=a[i]+b[i];
res[i+1]=a[i+1]+b[i+1];
res[i+2]=a[i+2]+b[i+2];
res[i+3]=a[i+3]+b[i+3];
}
}
__global__ void sumArraysGPU(float*a,float*b,float*res,int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i < N)
res[i]=a[i]+b[i];
}
int main(int argc,char **argv)
{
// set up device
initDevice(0);
int nElem=1<<24;
printf("Vector size:%d\n",nElem);
int nByte=sizeof(float)*nElem;
float *res_h=(float*)malloc(nByte);
memset(res_h,0,nByte);
//memset(res_from_gpu_h,0,nByte);
float *a_d,*b_d,*res_d;
CHECK(hipMallocManaged((float**)&a_d,nByte));
CHECK(hipMallocManaged((float**)&b_d,nByte));
CHECK(hipMallocManaged((float**)&res_d,nByte));
initialData(a_d,nElem);
initialData(b_d,nElem);
//CHECK(hipMemcpy(a_d,a_h,nByte,hipMemcpyHostToDevice));
//CHECK(hipMemcpy(b_d,b_h,nByte,hipMemcpyHostToDevice));
dim3 block(512);
dim3 grid((nElem-1)/block.x+1);
double iStart,iElaps;
iStart=cpuSecond();
hipLaunchKernelGGL(( sumArraysGPU), dim3(grid),dim3(block), 0, 0, a_d,b_d,res_d,nElem);
hipDeviceSynchronize();
iElaps=cpuSecond()-iStart;
printf("Execution configuration<<<%d,%d>>> Time elapsed %f sec\n",grid.x,block.x,iElaps);
//CHECK(hipMemcpy(res_from_gpu_h,res_d,nByte,hipMemcpyDeviceToHost));
sumArrays(b_d,b_d,res_h,nElem);
checkResult(res_h,res_d,nElem);
hipFree(a_d);
hipFree(b_d);
hipFree(res_d);
free(res_h);
return 0;
}
| f56a48d7af0294e5974550ead6e8038c961a6919.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "freshman.h"
void sumArrays(float * a,float * b,float * res,const int size)
{
for(int i=0;i<size;i+=4)
{
res[i]=a[i]+b[i];
res[i+1]=a[i+1]+b[i+1];
res[i+2]=a[i+2]+b[i+2];
res[i+3]=a[i+3]+b[i+3];
}
}
__global__ void sumArraysGPU(float*a,float*b,float*res,int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i < N)
res[i]=a[i]+b[i];
}
int main(int argc,char **argv)
{
// set up device
initDevice(0);
int nElem=1<<24;
printf("Vector size:%d\n",nElem);
int nByte=sizeof(float)*nElem;
float *res_h=(float*)malloc(nByte);
memset(res_h,0,nByte);
//memset(res_from_gpu_h,0,nByte);
float *a_d,*b_d,*res_d;
CHECK(cudaMallocManaged((float**)&a_d,nByte));
CHECK(cudaMallocManaged((float**)&b_d,nByte));
CHECK(cudaMallocManaged((float**)&res_d,nByte));
initialData(a_d,nElem);
initialData(b_d,nElem);
//CHECK(cudaMemcpy(a_d,a_h,nByte,cudaMemcpyHostToDevice));
//CHECK(cudaMemcpy(b_d,b_h,nByte,cudaMemcpyHostToDevice));
dim3 block(512);
dim3 grid((nElem-1)/block.x+1);
double iStart,iElaps;
iStart=cpuSecond();
sumArraysGPU<<<grid,block>>>(a_d,b_d,res_d,nElem);
cudaDeviceSynchronize();
iElaps=cpuSecond()-iStart;
printf("Execution configuration<<<%d,%d>>> Time elapsed %f sec\n",grid.x,block.x,iElaps);
//CHECK(cudaMemcpy(res_from_gpu_h,res_d,nByte,cudaMemcpyDeviceToHost));
sumArrays(b_d,b_d,res_h,nElem);
checkResult(res_h,res_d,nElem);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(res_d);
free(res_h);
return 0;
}
|
11425f077f60914a0d9a3b36317e1f077c600ecb.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error("Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int64_t> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = (int64_t)stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int64_t> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent) {
std::vector<int64_t> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent);
break;
case library::LayoutTypeID::kRowMajor:
stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent);
break;
default: break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(
bytes,
layout_id,
extent,
stride);
}
else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = (int)stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout *>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride);
default: break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation():
type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
size_t capacity
):
type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown), batch_count_(1) {
hipError_t result = hipMalloc((void **)&pointer_, bytes(type, capacity));
if (result != hipSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count
):
type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
hipFree(pointer_);
}
}
DeviceAllocation &DeviceAllocation::reset() {
if (pointer_) {
hipFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
hipError_t result = hipMalloc((void **)&pointer_, bytes(type_, capacity_));
if (result != hipSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation &DeviceAllocation::reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ = construct_layout(
tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id,
extent,
stride_);
capacity_ = batch_stride_ * batch_count_;
hipError_t result = hipMalloc((void **)&pointer_, bytes(type, capacity_));
if (result != hipSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void *DeviceAllocation::data() const {
return pointer_;
}
void *DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int64_t> const & DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const & DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) between items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) between items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
hipError_t result = hipMemcpy(data(), ptr, bytes(), hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
hipError_t result = hipMemcpy(data(), ptr, bytes(), hipMemcpyHostToDevice);
if (result != hipSuccess) {
throw std::runtime_error("Failed host-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
hipError_t result = hipMemcpy(ptr, data(), bytes(), hipMemcpyDeviceToHost);
if (result != hipSuccess) {
throw std::runtime_error("Failed device-to-host copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
default: break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const *>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const *>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity);
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareRelativelyEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity,
static_cast<float_e4m3_t>(epsilon),
static_cast<float_e4m3_t>(nonzero_floor));
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareRelativelyEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity,
static_cast<float_e5m2_t>(epsilon),
static_cast<float_e5m2_t>(nonzero_floor));
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids false positives.
// (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<cutlass::complex<half_t> >(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<cutlass::complex<double> >(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
default:
{
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[Rank - 1] = (int)vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[0] = (int)vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(
std::ostream &out,
DeviceAllocation &allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>,
Layout::kStrideRank>(stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const *>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(
std::ostream &out,
DeviceAllocation &allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(
std::ostream &out) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
write_tensor_csv_static_type<float_e4m3_t>(out, *this);
break;
case library::NumericTypeID::kFE5M2:
write_tensor_csv_static_type<float_e5m2_t>(out, *this);
break;
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float> >(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double> >(out, *this);
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
template <typename Element, typename Layout>
static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::LongIndex> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>,
Layout::kStrideRank>(stride, allocation.stride());
TensorView<Element, Layout> view(
static_cast<Element *>(allocation.data()),
Layout(stride),
extent
);
cutlass::reference::device::TensorFill<Element, Layout>(
view,
val
);
}
template <typename Element>
static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val);
break;
case library::LayoutTypeID::kColumnMajor:
tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val);
break;
case library::LayoutTypeID::kTensorNHWC:
tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNDHWC:
tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC32HW32:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC64HW64:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC32RSK32:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC64RSK64:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val);
break;
default:
throw std::runtime_error("Unsupported layout");
break;
}
}
/// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
void DeviceAllocation::fill(double val = 0.0) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
tensor_fill<float_e4m3_t>(*this, static_cast<float_e4m3_t>(val));
break;
case library::NumericTypeID::kFE5M2:
tensor_fill<float_e5m2_t>(*this, static_cast<float_e5m2_t>(val));
break;
case library::NumericTypeID::kF16:
tensor_fill<half_t>(*this, static_cast<half_t>(val));
break;
case library::NumericTypeID::kBF16:
tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val));
break;
case library::NumericTypeID::kTF32:
tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val));
break;
case library::NumericTypeID::kF32:
tensor_fill<float>(*this, static_cast<float>(val));
break;
case library::NumericTypeID::kF64:
tensor_fill<double>(*this, static_cast<double>(val));
break;
case library::NumericTypeID::kS2:
tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val));
break;
case library::NumericTypeID::kS4:
tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val));
break;
case library::NumericTypeID::kS8:
tensor_fill<int8_t>(*this, static_cast<int8_t>(val));
break;
case library::NumericTypeID::kS16:
tensor_fill<int16_t>(*this, static_cast<int16_t>(val));
break;
case library::NumericTypeID::kS32:
tensor_fill<int32_t>(*this, static_cast<int32_t>(val));
break;
case library::NumericTypeID::kS64:
tensor_fill<int64_t>(*this, static_cast<int64_t>(val));
break;
case library::NumericTypeID::kB1:
tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val));
break;
case library::NumericTypeID::kU2:
tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val));
break;
case library::NumericTypeID::kU4:
tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val));
break;
case library::NumericTypeID::kU8:
tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val));
break;
case library::NumericTypeID::kU16:
tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val));
break;
case library::NumericTypeID::kU32:
tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val));
break;
case library::NumericTypeID::kU64:
tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val));
break;
case library::NumericTypeID::kCF16:
tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val));
break;
case library::NumericTypeID::kCF32:
tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val));
break;
case library::NumericTypeID::kCF64:
tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val));
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
| 11425f077f60914a0d9a3b36317e1f077c600ecb.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <cstring>
#include "cutlass/numeric_types.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/library/util.h"
#include "device_allocation.h"
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Layout>
static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) {
typename Layout::TensorCoord extent_coord;
typename Layout::Stride stride_coord;
if (extent.size() != size_t(Layout::kRank)) {
throw std::runtime_error("Layout does not have same rank as extent vector.");
}
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
std::vector<int64_t> stride;
stride.resize(Layout::kStrideRank, 0);
Layout layout = Layout::packed(extent_coord);
stride_coord = layout.stride();
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride.at(i) = (int64_t)stride_coord[i];
}
return stride;
}
/// Returns the stride of a packed layout
std::vector<int64_t> DeviceAllocation::get_packed_layout(
library::LayoutTypeID layout_id,
std::vector<int> const &extent) {
std::vector<int64_t> stride;
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent);
break;
case library::LayoutTypeID::kRowMajor:
stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent);
break;
case library::LayoutTypeID::kTensorNCHW:
stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent);
break;
case library::LayoutTypeID::kTensorNHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent);
break;
case library::LayoutTypeID::kTensorNDHWC:
stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent);
break;
case library::LayoutTypeID::kTensorNC32HW32:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent);
break;
case library::LayoutTypeID::kTensorNC64HW64:
stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent);
break;
case library::LayoutTypeID::kTensorC32RSK32:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent);
break;
case library::LayoutTypeID::kTensorC64RSK64:
stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent);
break;
default: break;
}
return stride;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template to use CUTLASS Layout functions to
template <typename Layout>
static size_t construct_layout_(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
if (extent.size() != Layout::kRank) {
throw std::runtime_error(
"Layout must have same rank as extent vector.");
}
if (Layout::kStrideRank && stride.empty()) {
stride = get_packed_layout_stride<Layout>(extent);
return construct_layout_<Layout>(
bytes,
layout_id,
extent,
stride);
}
else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
throw std::runtime_error(
"Layout requires either empty stride or stride vector matching Layout::kStrideRank");
}
typename Layout::Stride stride_coord;
for (int i = 0; i < Layout::kStrideRank; ++i) {
stride_coord[i] = (int)stride.at(i);
}
typename Layout::TensorCoord extent_coord;
for (int i = 0; i < Layout::kRank; ++i) {
extent_coord[i] = extent.at(i);
}
// Construct the CUTLASS layout object from the stride object
Layout layout(stride_coord);
// Pack it into bytes
if (bytes) {
*reinterpret_cast<Layout *>(bytes) = layout;
}
// Return capacity
size_t capacity_ = layout.capacity(extent_coord);
return capacity_;
}
/// returns the capacity needed
size_t DeviceAllocation::construct_layout(
void *bytes,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> &stride) {
switch (layout_id) {
case library::LayoutTypeID::kColumnMajor:
return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajor:
return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK2:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK2:
return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK4:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK4:
return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK16:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK16:
return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK32:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK32:
return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kColumnMajorInterleavedK64:
return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kRowMajorInterleavedK64:
return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNCHW:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNHWC:
return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNDHWC:
return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC32HW32:
return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorNC64HW64:
return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC32RSK32:
return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride);
case library::LayoutTypeID::kTensorC64RSK64:
return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride);
default: break;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
DeviceAllocation::DeviceAllocation():
type_(library::NumericTypeID::kInvalid),
batch_stride_(0),
capacity_(0),
pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown),
batch_count_(1) {
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
size_t capacity
):
type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr),
layout_(library::LayoutTypeID::kUnknown), batch_count_(1) {
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity));
if (result != cudaSuccess) {
type_ = library::NumericTypeID::kInvalid;
capacity_ = 0;
pointer_ = nullptr;
throw std::bad_alloc();
}
}
DeviceAllocation::DeviceAllocation(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count
):
type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) {
reset(type, layout_id, extent, stride, batch_count);
}
DeviceAllocation::~DeviceAllocation() {
if (pointer_) {
cudaFree(pointer_);
}
}
DeviceAllocation &DeviceAllocation::reset() {
if (pointer_) {
cudaFree(pointer_);
}
type_ = library::NumericTypeID::kInvalid;
batch_stride_ = 0;
capacity_ = 0;
pointer_ = nullptr;
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
tensor_ref_buffer_.clear();
batch_count_ = 1;
return *this;
}
DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) {
reset();
type_ = type;
batch_stride_ = capacity;
capacity_ = capacity;
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type_, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
layout_ = library::LayoutTypeID::kUnknown;
stride_.clear();
extent_.clear();
batch_count_ = 1;
tensor_ref_buffer_.resize(sizeof(pointer_), 0);
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
/// Allocates memory for a given layout and tensor
DeviceAllocation &DeviceAllocation::reset(
library::NumericTypeID type,
library::LayoutTypeID layout_id,
std::vector<int> const &extent,
std::vector<int64_t> const &stride,
int batch_count) {
reset();
tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0);
type_ = type;
layout_ = layout_id;
stride_ = stride;
extent_ = extent;
batch_count_ = batch_count;
batch_stride_ = construct_layout(
tensor_ref_buffer_.data() + sizeof(pointer_),
layout_id,
extent,
stride_);
capacity_ = batch_stride_ * batch_count_;
cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity_));
if (result != cudaSuccess) {
throw std::bad_alloc();
}
std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
return *this;
}
bool DeviceAllocation::good() const {
return (capacity_ && pointer_);
}
library::NumericTypeID DeviceAllocation::type() const {
return type_;
}
void *DeviceAllocation::data() const {
return pointer_;
}
void *DeviceAllocation::batch_data(int batch_idx) const {
return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx;
}
library::LayoutTypeID DeviceAllocation::layout() const {
return layout_;
}
std::vector<int64_t> const & DeviceAllocation::stride() const {
return stride_;
}
/// Gets the extent vector
std::vector<int> const & DeviceAllocation::extent() const {
return extent_;
}
/// Gets the number of adjacent tensors in memory
int DeviceAllocation::batch_count() const {
return batch_count_;
}
/// Gets the stride (in units of elements) between items
int64_t DeviceAllocation::batch_stride() const {
return batch_stride_;
}
/// Gets the stride (in units of bytes) between items
int64_t DeviceAllocation::batch_stride_bytes() const {
return bytes(type_, batch_stride_);
}
size_t DeviceAllocation::capacity() const {
return capacity_;
}
size_t DeviceAllocation::bytes() const {
return bytes(type_, capacity_);
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_device(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_from_host(void const *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
throw std::runtime_error("Failed host-to-device copy");
}
}
/// Copies from an equivalent-sized tensor in device memory
void DeviceAllocation::copy_to_host(void *ptr) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping copy of size 0 allocation\n";
#endif
return;
}
cudaError_t result = cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("Failed device-to-host copy");
}
}
void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kF16:
cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::device::BlockFillRandom<float>(
reinterpret_cast<float *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE4M3:
cutlass::reference::device::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::device::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::device::BlockFillRandom<double>(
reinterpret_cast<double *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::device::BlockFillRandom<complex<double>>(
reinterpret_cast<complex<double> *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::device::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::device::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::device::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::device::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::device::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::device::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::device::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::device::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::device::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::device::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::device::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(pointer_),
capacity_,
seed,
dist
);
break;
default: break;
}
}
void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kFE4M3:
cutlass::reference::host::BlockFillRandom<cutlass::float_e4m3_t>(
reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kFE5M2:
cutlass::reference::host::BlockFillRandom<cutlass::float_e5m2_t>(
reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF16:
cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
reinterpret_cast<cutlass::half_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kBF16:
cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kTF32:
cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF32:
cutlass::reference::host::BlockFillRandom<float>(
reinterpret_cast<float *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>(
reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCBF16:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>(
reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCTF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF32:
cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kF64:
cutlass::reference::host::BlockFillRandom<double>(
reinterpret_cast<double *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kCF64:
cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS2:
cutlass::reference::host::BlockFillRandom<int2b_t>(
reinterpret_cast<int2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS4:
cutlass::reference::host::BlockFillRandom<int4b_t>(
reinterpret_cast<int4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS8:
cutlass::reference::host::BlockFillRandom<int8_t>(
reinterpret_cast<int8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandom<int16_t>(
reinterpret_cast<int16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandom<int32_t>(
reinterpret_cast<int32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kS64:
cutlass::reference::host::BlockFillRandom<int64_t>(
reinterpret_cast<int64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kB1:
cutlass::reference::host::BlockFillRandom<uint1b_t>(
reinterpret_cast<uint1b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU2:
cutlass::reference::host::BlockFillRandom<uint2b_t>(
reinterpret_cast<uint2b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU4:
cutlass::reference::host::BlockFillRandom<uint4b_t>(
reinterpret_cast<uint4b_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU8:
cutlass::reference::host::BlockFillRandom<uint8_t>(
reinterpret_cast<uint8_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU16:
cutlass::reference::host::BlockFillRandom<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::host::BlockFillRandom<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
case library::NumericTypeID::kU64:
cutlass::reference::host::BlockFillRandom<uint64_t>(
reinterpret_cast<uint64_t *>(host_data.data()),
capacity_,
seed,
dist
);
break;
default: break;
}
copy_from_host(host_data.data());
}
void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
// Instantiate calls to CURAND here. This file takes a long time to compile for
// this reason.
switch (type_) {
case library::NumericTypeID::kU16:
cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kU32:
cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(pointer_),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
}
void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) {
if (!bytes()) {
#ifndef NDEBUG
std::cout << "Skipping initialization of size 0 allocation\n";
#endif
return;
}
if (!data()) {
throw std::runtime_error("Attempting to initialize invalid allocation.");
}
std::vector<uint8_t> host_data(bytes());
switch (type_) {
case library::NumericTypeID::kS16:
cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
reinterpret_cast<uint16_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
case library::NumericTypeID::kS32:
cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
reinterpret_cast<uint32_t *>(host_data.data()),
capacity_,
seed,
MetaSizeInBits
);
break;
default:
break;
}
copy_from_host(host_data.data());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two blocks have exactly the same value
bool DeviceAllocation::block_compare_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF16:
return reference::device::BlockCompareEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kF32:
return reference::device::BlockCompareEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<complex<half_t>>(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCBF16:
return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
reinterpret_cast<complex<bfloat16_t> const *>(ptr_A),
reinterpret_cast<complex<bfloat16_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCTF32:
return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
reinterpret_cast<complex<tfloat32_t> const *>(ptr_A),
reinterpret_cast<complex<tfloat32_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kF64:
return reference::device::BlockCompareEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<complex<double>>(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
case library::NumericTypeID::kS2:
return reference::device::BlockCompareEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS4:
return reference::device::BlockCompareEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS8:
return reference::device::BlockCompareEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS16:
return reference::device::BlockCompareEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS32:
return reference::device::BlockCompareEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kS64:
return reference::device::BlockCompareEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kB1:
return reference::device::BlockCompareEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU2:
return reference::device::BlockCompareEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU4:
return reference::device::BlockCompareEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU8:
return reference::device::BlockCompareEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU16:
return reference::device::BlockCompareEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU32:
return reference::device::BlockCompareEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity);
case library::NumericTypeID::kU64:
return reference::device::BlockCompareEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity);
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/// Returns true if two blocks have approximately the same value
bool DeviceAllocation::block_compare_relatively_equal(
library::NumericTypeID numeric_type,
void const *ptr_A,
void const *ptr_B,
size_t capacity,
double epsilon,
double nonzero_floor) {
switch (numeric_type) {
case library::NumericTypeID::kFE4M3:
return reference::device::BlockCompareRelativelyEqual<float_e4m3_t>(
reinterpret_cast<float_e4m3_t const *>(ptr_A),
reinterpret_cast<float_e4m3_t const *>(ptr_B),
capacity,
static_cast<float_e4m3_t>(epsilon),
static_cast<float_e4m3_t>(nonzero_floor));
case library::NumericTypeID::kFE5M2:
return reference::device::BlockCompareRelativelyEqual<float_e5m2_t>(
reinterpret_cast<float_e5m2_t const *>(ptr_A),
reinterpret_cast<float_e5m2_t const *>(ptr_B),
capacity,
static_cast<float_e5m2_t>(epsilon),
static_cast<float_e5m2_t>(nonzero_floor));
case library::NumericTypeID::kF16:
return reference::device::BlockCompareRelativelyEqual<half_t>(
reinterpret_cast<half_t const *>(ptr_A),
reinterpret_cast<half_t const *>(ptr_B),
capacity,
static_cast<half_t>(epsilon),
static_cast<half_t>(nonzero_floor));
case library::NumericTypeID::kBF16:
return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
reinterpret_cast<bfloat16_t const *>(ptr_A),
reinterpret_cast<bfloat16_t const *>(ptr_B),
capacity,
static_cast<bfloat16_t>(epsilon),
static_cast<bfloat16_t>(nonzero_floor));
case library::NumericTypeID::kTF32:
return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
reinterpret_cast<tfloat32_t const *>(ptr_A),
reinterpret_cast<tfloat32_t const *>(ptr_B),
capacity,
static_cast<tfloat32_t>(epsilon),
static_cast<tfloat32_t>(nonzero_floor));
case library::NumericTypeID::kF32:
return reference::device::BlockCompareRelativelyEqual<float>(
reinterpret_cast<float const *>(ptr_A),
reinterpret_cast<float const *>(ptr_B),
capacity,
static_cast<float>(epsilon),
static_cast<float>(nonzero_floor));
case library::NumericTypeID::kF64:
return reference::device::BlockCompareRelativelyEqual<double>(
reinterpret_cast<double const *>(ptr_A),
reinterpret_cast<double const *>(ptr_B),
capacity,
static_cast<double>(epsilon),
static_cast<double>(nonzero_floor));
case library::NumericTypeID::kS2:
return reference::device::BlockCompareRelativelyEqual<int2b_t>(
reinterpret_cast<int2b_t const *>(ptr_A),
reinterpret_cast<int2b_t const *>(ptr_B),
capacity,
static_cast<int2b_t>(epsilon),
static_cast<int2b_t>(nonzero_floor));
case library::NumericTypeID::kS4:
return reference::device::BlockCompareRelativelyEqual<int4b_t>(
reinterpret_cast<int4b_t const *>(ptr_A),
reinterpret_cast<int4b_t const *>(ptr_B),
capacity,
static_cast<int4b_t>(epsilon),
static_cast<int4b_t>(nonzero_floor));
case library::NumericTypeID::kS8:
return reference::device::BlockCompareRelativelyEqual<int8_t>(
reinterpret_cast<int8_t const *>(ptr_A),
reinterpret_cast<int8_t const *>(ptr_B),
capacity,
static_cast<int8_t>(epsilon),
static_cast<int8_t>(nonzero_floor));
case library::NumericTypeID::kS16:
return reference::device::BlockCompareRelativelyEqual<int16_t>(
reinterpret_cast<int16_t const *>(ptr_A),
reinterpret_cast<int16_t const *>(ptr_B),
capacity,
static_cast<int16_t>(epsilon),
static_cast<int16_t>(nonzero_floor));
case library::NumericTypeID::kS32:
return reference::device::BlockCompareRelativelyEqual<int32_t>(
reinterpret_cast<int32_t const *>(ptr_A),
reinterpret_cast<int32_t const *>(ptr_B),
capacity,
static_cast<int32_t>(epsilon),
static_cast<int32_t>(nonzero_floor));
case library::NumericTypeID::kS64:
return reference::device::BlockCompareRelativelyEqual<int64_t>(
reinterpret_cast<int64_t const *>(ptr_A),
reinterpret_cast<int64_t const *>(ptr_B),
capacity,
static_cast<int64_t>(epsilon),
static_cast<int64_t>(nonzero_floor));
case library::NumericTypeID::kB1:
return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
reinterpret_cast<uint1b_t const *>(ptr_A),
reinterpret_cast<uint1b_t const *>(ptr_B),
capacity,
static_cast<uint1b_t>(epsilon),
static_cast<uint1b_t>(nonzero_floor));
case library::NumericTypeID::kU2:
return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
reinterpret_cast<uint2b_t const *>(ptr_A),
reinterpret_cast<uint2b_t const *>(ptr_B),
capacity,
static_cast<uint2b_t>(epsilon),
static_cast<uint2b_t>(nonzero_floor));
case library::NumericTypeID::kU4:
return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
reinterpret_cast<uint4b_t const *>(ptr_A),
reinterpret_cast<uint4b_t const *>(ptr_B),
capacity,
static_cast<uint4b_t>(epsilon),
static_cast<uint4b_t>(nonzero_floor));
case library::NumericTypeID::kU8:
return reference::device::BlockCompareRelativelyEqual<uint8_t>(
reinterpret_cast<uint8_t const *>(ptr_A),
reinterpret_cast<uint8_t const *>(ptr_B),
capacity,
static_cast<uint8_t>(epsilon),
static_cast<uint8_t>(nonzero_floor));
case library::NumericTypeID::kU16:
return reference::device::BlockCompareRelativelyEqual<uint16_t>(
reinterpret_cast<uint16_t const *>(ptr_A),
reinterpret_cast<uint16_t const *>(ptr_B),
capacity,
static_cast<uint16_t>(epsilon),
static_cast<uint16_t>(nonzero_floor));
case library::NumericTypeID::kU32:
return reference::device::BlockCompareRelativelyEqual<uint32_t>(
reinterpret_cast<uint32_t const *>(ptr_A),
reinterpret_cast<uint32_t const *>(ptr_B),
capacity,
static_cast<uint32_t>(epsilon),
static_cast<uint32_t>(nonzero_floor));
case library::NumericTypeID::kU64:
return reference::device::BlockCompareRelativelyEqual<uint64_t>(
reinterpret_cast<uint64_t const *>(ptr_A),
reinterpret_cast<uint64_t const *>(ptr_B),
capacity,
static_cast<uint64_t>(epsilon),
static_cast<uint64_t>(nonzero_floor));
// No relatively equal comparison for complex numbers.
//
// As a simplification, we can require bitwise equality. This avoids false positives.
// (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.)
//
case library::NumericTypeID::kCF16:
return reference::device::BlockCompareEqual<cutlass::complex<half_t> >(
reinterpret_cast<complex<half_t> const *>(ptr_A),
reinterpret_cast<complex<half_t> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF32:
return reference::device::BlockCompareEqual<cutlass::complex<float> >(
reinterpret_cast<complex<float> const *>(ptr_A),
reinterpret_cast<complex<float> const *>(ptr_B),
capacity);
case library::NumericTypeID::kCF64:
return reference::device::BlockCompareEqual<cutlass::complex<double> >(
reinterpret_cast<complex<double> const *>(ptr_A),
reinterpret_cast<complex<double> const *>(ptr_B),
capacity);
default:
{
throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord, int Rank>
struct vector_to_coord {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[Rank - 1] = vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[Rank - 1] = (int)vec.at(Rank - 1);
if (Rank > 1) {
vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
}
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 1> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
coord[0] = vec.at(0);
}
vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
coord[0] = (int)vec.at(0);
}
};
/// Permits copying dynamic vectors into static-length vectors
template <typename TensorCoord>
struct vector_to_coord<TensorCoord, 0> {
vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Layout>
static void write_tensor_csv_static_tensor_view(
std::ostream &out,
DeviceAllocation &allocation) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>,
Layout::kStrideRank>(stride, allocation.stride());
Layout layout(stride);
HostTensor<Element, Layout> host_tensor(extent, layout, false);
if (host_tensor.capacity() != allocation.batch_stride()) {
throw std::runtime_error("Unexpected capacity to equal.");
}
host_tensor.copy_in_device_to_host(
static_cast<Element const *>(allocation.data()),
allocation.batch_stride());
TensorViewWrite(out, host_tensor.host_view());
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static void write_tensor_csv_static_type(
std::ostream &out,
DeviceAllocation &allocation) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajor:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK2:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK4:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK16:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK32:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation);
break;
case library::LayoutTypeID::kRowMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kColumnMajorInterleavedK64:
write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNDHWC:
write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC32HW32:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorNC64HW64:
write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC32RSK32:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation);
break;
case library::LayoutTypeID::kTensorC64RSK64:
write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation);
break;
default:
throw std::runtime_error("Unhandled layout");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a tensor to csv
void DeviceAllocation::write_tensor_csv(
std::ostream &out) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
write_tensor_csv_static_type<float_e4m3_t>(out, *this);
break;
case library::NumericTypeID::kFE5M2:
write_tensor_csv_static_type<float_e5m2_t>(out, *this);
break;
case library::NumericTypeID::kF16:
write_tensor_csv_static_type<half_t>(out, *this);
break;
case library::NumericTypeID::kBF16:
write_tensor_csv_static_type<bfloat16_t>(out, *this);
break;
case library::NumericTypeID::kTF32:
write_tensor_csv_static_type<tfloat32_t>(out, *this);
break;
case library::NumericTypeID::kF32:
write_tensor_csv_static_type<float>(out, *this);
break;
case library::NumericTypeID::kF64:
write_tensor_csv_static_type<double>(out, *this);
break;
case library::NumericTypeID::kS2:
write_tensor_csv_static_type<int2b_t>(out, *this);
break;
case library::NumericTypeID::kS4:
write_tensor_csv_static_type<int4b_t>(out, *this);
break;
case library::NumericTypeID::kS8:
write_tensor_csv_static_type<int8_t>(out, *this);
break;
case library::NumericTypeID::kS16:
write_tensor_csv_static_type<int16_t>(out, *this);
break;
case library::NumericTypeID::kS32:
write_tensor_csv_static_type<int32_t>(out, *this);
break;
case library::NumericTypeID::kS64:
write_tensor_csv_static_type<int64_t>(out, *this);
break;
case library::NumericTypeID::kB1:
write_tensor_csv_static_type<uint1b_t>(out, *this);
break;
case library::NumericTypeID::kU2:
write_tensor_csv_static_type<uint2b_t>(out, *this);
break;
case library::NumericTypeID::kU4:
write_tensor_csv_static_type<uint4b_t>(out, *this);
break;
case library::NumericTypeID::kU8:
write_tensor_csv_static_type<uint8_t>(out, *this);
break;
case library::NumericTypeID::kU16:
write_tensor_csv_static_type<uint16_t>(out, *this);
break;
case library::NumericTypeID::kU32:
write_tensor_csv_static_type<uint32_t>(out, *this);
break;
case library::NumericTypeID::kU64:
write_tensor_csv_static_type<uint64_t>(out, *this);
break;
case library::NumericTypeID::kCF16:
write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this);
break;
case library::NumericTypeID::kCF32:
write_tensor_csv_static_type<cutlass::complex<float> >(out, *this);
break;
case library::NumericTypeID::kCF64:
write_tensor_csv_static_type<cutlass::complex<double> >(out, *this);
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
template <typename Element, typename Layout>
static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) {
Coord<Layout::kRank> extent;
Coord<Layout::kStrideRank, typename Layout::LongIndex> stride;
if (allocation.extent().size() != Layout::kRank) {
throw std::runtime_error("Allocation extent has invalid rank");
}
if (allocation.stride().size() != Layout::kStrideRank) {
throw std::runtime_error("Allocation stride has invalid rank");
}
vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>,
Layout::kStrideRank>(stride, allocation.stride());
TensorView<Element, Layout> view(
static_cast<Element *>(allocation.data()),
Layout(stride),
extent
);
cutlass::reference::device::TensorFill<Element, Layout>(
view,
val
);
}
template <typename Element>
static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) {
switch (allocation.layout()) {
case library::LayoutTypeID::kRowMajor:
tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val);
break;
case library::LayoutTypeID::kColumnMajor:
tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val);
break;
case library::LayoutTypeID::kTensorNHWC:
tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNDHWC:
tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC32HW32:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorNC64HW64:
tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC32RSK32:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val);
break;
case library::LayoutTypeID::kTensorC64RSK64:
tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val);
break;
default:
throw std::runtime_error("Unsupported layout");
break;
}
}
/// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
void DeviceAllocation::fill(double val = 0.0) {
switch (this->type()) {
case library::NumericTypeID::kFE4M3:
tensor_fill<float_e4m3_t>(*this, static_cast<float_e4m3_t>(val));
break;
case library::NumericTypeID::kFE5M2:
tensor_fill<float_e5m2_t>(*this, static_cast<float_e5m2_t>(val));
break;
case library::NumericTypeID::kF16:
tensor_fill<half_t>(*this, static_cast<half_t>(val));
break;
case library::NumericTypeID::kBF16:
tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val));
break;
case library::NumericTypeID::kTF32:
tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val));
break;
case library::NumericTypeID::kF32:
tensor_fill<float>(*this, static_cast<float>(val));
break;
case library::NumericTypeID::kF64:
tensor_fill<double>(*this, static_cast<double>(val));
break;
case library::NumericTypeID::kS2:
tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val));
break;
case library::NumericTypeID::kS4:
tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val));
break;
case library::NumericTypeID::kS8:
tensor_fill<int8_t>(*this, static_cast<int8_t>(val));
break;
case library::NumericTypeID::kS16:
tensor_fill<int16_t>(*this, static_cast<int16_t>(val));
break;
case library::NumericTypeID::kS32:
tensor_fill<int32_t>(*this, static_cast<int32_t>(val));
break;
case library::NumericTypeID::kS64:
tensor_fill<int64_t>(*this, static_cast<int64_t>(val));
break;
case library::NumericTypeID::kB1:
tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val));
break;
case library::NumericTypeID::kU2:
tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val));
break;
case library::NumericTypeID::kU4:
tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val));
break;
case library::NumericTypeID::kU8:
tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val));
break;
case library::NumericTypeID::kU16:
tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val));
break;
case library::NumericTypeID::kU32:
tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val));
break;
case library::NumericTypeID::kU64:
tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val));
break;
case library::NumericTypeID::kCF16:
tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val));
break;
case library::NumericTypeID::kCF32:
tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val));
break;
case library::NumericTypeID::kCF64:
tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val));
break;
default:
throw std::runtime_error("Unsupported numeric type");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
|
fd2ba55c47b08766166b33f7e5cce753aea4c904.hip | // !!! This is a file automatically generated by hipify!!!
#include "MatrixVectorMult.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "CudaUtil.h"
#include <math.h>
#include <stdio.h>
__global__ void matrixVectorMultKernel(float* fltMatrix, float* vec, float* output, int rows, int columns){
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < rows){
float sum = 0.0f;
for (int col = 0; col < columns; ++col) {
sum += fltMatrix[row * columns + col] + vec[col];
}
output[row] = sum;
}
}
void MatrixVectorMultHost(const float* fltMatA, const float* vecB, float *output, int rows, int columns){
float *devFltMat, *devVecB, *devOutput;
int matEls = rows * columns;
int vecEls = columns;
int outPutEls = rows;
int matSize = matEls * sizeof(float);
int vecSize = vecEls * sizeof(float);
int outputSize = outPutEls * sizeof(float);
//Allocate memory on GPU
CUDA_CHECK(hipMalloc(&devFltMat, matSize));
CUDA_CHECK(hipMalloc(&devVecB, vecSize));
CUDA_CHECK(hipMalloc(&devOutput, outputSize));
//Copy memory to GPU
CUDA_CHECK(hipMemcpy(devFltMat,fltMatA,matSize,hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(devVecB,vecB,vecSize,hipMemcpyHostToDevice));
//configure, lunch and synchronize kernel
dim3 blockDim(256, 1, 1);
dim3 gridDim(ceil(rows/256.0f), 1, 1);
hipLaunchKernelGGL(( matrixVectorMultKernel), dim3(gridDim),dim3(blockDim), 0, 0, devFltMat,devVecB,devOutput,rows,columns);
CUDA_CHECK(hipDeviceSynchronize());
//copy memory back to host
CUDA_CHECK(hipMemcpy(output, devOutput, outputSize, hipMemcpyDeviceToHost));
//free device memory
CUDA_CHECK(hipFree(devFltMat));
CUDA_CHECK(hipFree(devVecB));
CUDA_CHECK(hipFree(devOutput));
}
| fd2ba55c47b08766166b33f7e5cce753aea4c904.cu | #include "MatrixVectorMult.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "CudaUtil.h"
#include <math.h>
#include <stdio.h>
__global__ void matrixVectorMultKernel(float* fltMatrix, float* vec, float* output, int rows, int columns){
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < rows){
float sum = 0.0f;
for (int col = 0; col < columns; ++col) {
sum += fltMatrix[row * columns + col] + vec[col];
}
output[row] = sum;
}
}
void MatrixVectorMultHost(const float* fltMatA, const float* vecB, float *output, int rows, int columns){
float *devFltMat, *devVecB, *devOutput;
int matEls = rows * columns;
int vecEls = columns;
int outPutEls = rows;
int matSize = matEls * sizeof(float);
int vecSize = vecEls * sizeof(float);
int outputSize = outPutEls * sizeof(float);
//Allocate memory on GPU
CUDA_CHECK(cudaMalloc(&devFltMat, matSize));
CUDA_CHECK(cudaMalloc(&devVecB, vecSize));
CUDA_CHECK(cudaMalloc(&devOutput, outputSize));
//Copy memory to GPU
CUDA_CHECK(cudaMemcpy(devFltMat,fltMatA,matSize,cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devVecB,vecB,vecSize,cudaMemcpyHostToDevice));
//configure, lunch and synchronize kernel
dim3 blockDim(256, 1, 1);
dim3 gridDim(ceil(rows/256.0f), 1, 1);
matrixVectorMultKernel<<<gridDim,blockDim>>>(devFltMat,devVecB,devOutput,rows,columns);
CUDA_CHECK(cudaDeviceSynchronize());
//copy memory back to host
CUDA_CHECK(cudaMemcpy(output, devOutput, outputSize, cudaMemcpyDeviceToHost));
//free device memory
CUDA_CHECK(cudaFree(devFltMat));
CUDA_CHECK(cudaFree(devVecB));
CUDA_CHECK(cudaFree(devOutput));
}
|
dad188c331c8e6b4bd84aa4133b1d4ab71ef844a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
////////////////////////////////////////////////////////////
/// These functions come from
/// https://stackoverflow.com/questions/18455414/how-to-do-power-of-complex-number-in-cublas
using rtype = double;
using ctype = hipDoubleComplex;
#define rpart(x) (cuCreal(x))
#define ipart(x) (cuCimag(x))
#define cmplx(x,y) (make_cuDoubleComplex(x,y))
__host__ __device__ rtype carg(const ctype& z) { return (rtype)atan2(ipart(z), rpart(z)); } // polar angle
__host__ __device__ rtype cabs(const ctype& z) { return (rtype)cuCabs(z); }
__host__ __device__ ctype cp2c(const rtype d, const rtype a) { return cmplx(d * cos(a), d * sin(a)); }
__host__ __device__ ctype cpow(const ctype& z, const int& n) { return cmplx((pow(cabs(z), n) * cos(n * carg(z))), (pow(cabs(z), n) * sin(n * carg(z)))); }
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
/// Source: https://solarianprogrammer.com/2013/02/28/mandelbrot-set-cpp-11/
/// \brief Returns smoothed color based on the number of dwells to max dwells
///
/// \return struct containing the rgb values to placed in buffer
////////////////////////////////////////////////////////////
__device__ RGB GetSmoothedColor(int n);
__global__ void MandelbrotKernel(PixelBuffer p_buffer, MandelPlotArea* plot_area)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x; //Position within image area
const int y = threadIdx.y + blockDim.y * blockIdx.y; //
//Image buffer must be reversed as it starts at the bottom not the top for iteration
int dwells; //number of dwells per pixel
RGB color;
if (x < plot_area->width && y < plot_area->height)
{
dwells = PixelDwell(x, y, plot_area);
color = GetSmoothedColor(dwells);
//Places pixel data in buffer
//Messy might refactor later
*(p_buffer + (x + y * plot_area->width) * 4 + 0) = color.r;
*(p_buffer + (x + y * plot_area->width) * 4 + 1) = color.g;
*(p_buffer + (x + y * plot_area->width) * 4 + 2) = color.b;
*(p_buffer + (x + y * plot_area->width) * 4 + 3) = 0xff;
}
}
int __device__ PixelDwell(int const x, int const y, MandelPlotArea* plot_area)
{
ctype c = cmplx(x, y),
z;
size_t iter = 0;
c = cmplx(
c.x / (double)plot_area->width * (plot_area->x_max - plot_area->x_min) + plot_area->x_min,
c.y / (double)plot_area->height * (plot_area->y_bot - plot_area->y_top) + plot_area->y_top
);
z = cmplx(c.x, c.y);
while (iter < K_MAX_DWELL && cuCabs(z) < 2.0)
{
z = cuCadd(cpow(z, 2), c);
++iter;
}
return iter;
}
void KernelCall(PixelBuffer buffer, MandelPlotArea* cu_plot, MandelPlotArea* h_plot)
{
dim3 bs(64, 4), grid(DivUp(h_plot->width, bs.x), DivUp(h_plot->height, bs.y));
hipLaunchKernelGGL(( MandelbrotKernel) , dim3(grid), dim3(bs) , 0, 0, buffer, cu_plot);
}
__device__ RGB GetSmoothedColor(int n) {
// map n on the 0..1 interval
RGB rgb;
double t = (double)n / (double)K_MAX_DWELL;
// Use smooth polynomials for r, g, b
rgb.r = (int)(9 * (1 - t) * t * t * t * 255);
rgb.g = (int)(15 * (1 - t) * (1 - t) * t * t * 255);
rgb.b = (int)(8.5 * (1 - t) * (1 - t) * (1 - t) * t * 255);
return rgb;
}
| dad188c331c8e6b4bd84aa4133b1d4ab71ef844a.cu | #include "kernel.cuh"
////////////////////////////////////////////////////////////
/// These functions come from
/// https://stackoverflow.com/questions/18455414/how-to-do-power-of-complex-number-in-cublas
using rtype = double;
using ctype = cuDoubleComplex;
#define rpart(x) (cuCreal(x))
#define ipart(x) (cuCimag(x))
#define cmplx(x,y) (make_cuDoubleComplex(x,y))
__host__ __device__ rtype carg(const ctype& z) { return (rtype)atan2(ipart(z), rpart(z)); } // polar angle
__host__ __device__ rtype cabs(const ctype& z) { return (rtype)cuCabs(z); }
__host__ __device__ ctype cp2c(const rtype d, const rtype a) { return cmplx(d * cos(a), d * sin(a)); }
__host__ __device__ ctype cpow(const ctype& z, const int& n) { return cmplx((pow(cabs(z), n) * cos(n * carg(z))), (pow(cabs(z), n) * sin(n * carg(z)))); }
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
/// Source: https://solarianprogrammer.com/2013/02/28/mandelbrot-set-cpp-11/
/// \brief Returns smoothed color based on the number of dwells to max dwells
///
/// \return struct containing the rgb values to placed in buffer
////////////////////////////////////////////////////////////
__device__ RGB GetSmoothedColor(int n);
__global__ void MandelbrotKernel(PixelBuffer p_buffer, MandelPlotArea* plot_area)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x; //Position within image area
const int y = threadIdx.y + blockDim.y * blockIdx.y; //
//Image buffer must be reversed as it starts at the bottom not the top for iteration
int dwells; //number of dwells per pixel
RGB color;
if (x < plot_area->width && y < plot_area->height)
{
dwells = PixelDwell(x, y, plot_area);
color = GetSmoothedColor(dwells);
//Places pixel data in buffer
//Messy might refactor later
*(p_buffer + (x + y * plot_area->width) * 4 + 0) = color.r;
*(p_buffer + (x + y * plot_area->width) * 4 + 1) = color.g;
*(p_buffer + (x + y * plot_area->width) * 4 + 2) = color.b;
*(p_buffer + (x + y * plot_area->width) * 4 + 3) = 0xff;
}
}
int __device__ PixelDwell(int const x, int const y, MandelPlotArea* plot_area)
{
ctype c = cmplx(x, y),
z;
size_t iter = 0;
c = cmplx(
c.x / (double)plot_area->width * (plot_area->x_max - plot_area->x_min) + plot_area->x_min,
c.y / (double)plot_area->height * (plot_area->y_bot - plot_area->y_top) + plot_area->y_top
);
z = cmplx(c.x, c.y);
while (iter < K_MAX_DWELL && cuCabs(z) < 2.0)
{
z = cuCadd(cpow(z, 2), c);
++iter;
}
return iter;
}
void KernelCall(PixelBuffer buffer, MandelPlotArea* cu_plot, MandelPlotArea* h_plot)
{
dim3 bs(64, 4), grid(DivUp(h_plot->width, bs.x), DivUp(h_plot->height, bs.y));
MandelbrotKernel <<<grid, bs >>> (buffer, cu_plot);
}
__device__ RGB GetSmoothedColor(int n) {
// map n on the 0..1 interval
RGB rgb;
double t = (double)n / (double)K_MAX_DWELL;
// Use smooth polynomials for r, g, b
rgb.r = (int)(9 * (1 - t) * t * t * t * 255);
rgb.g = (int)(15 * (1 - t) * (1 - t) * t * t * 255);
rgb.b = (int)(8.5 * (1 - t) * (1 - t) * (1 - t) * t * 255);
return rgb;
}
|
8f830da554c0c58fb5393527af5d5585052eb11f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
extern __shared__ int shared_mem[];
int * shmem=shared_mem;
shmem[threadIdx.x]=threadIdx.x;
a[threadIdx.x]=shmem[threadIdx.x];
b[threadIdx.x]=shmem[threadIdx.x];
c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x];
} | 8f830da554c0c58fb5393527af5d5585052eb11f.cu | #include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
extern __shared__ int shared_mem[];
int * shmem=shared_mem;
shmem[threadIdx.x]=threadIdx.x;
a[threadIdx.x]=shmem[threadIdx.x];
b[threadIdx.x]=shmem[threadIdx.x];
c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x];
} |
92a4b03fb9a2efc0c3331569b2c2f562f8895c62.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/partition_manager.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/host_scalar_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/device_atomics.cuh>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <tuple>
namespace cugraph {
namespace experimental {
namespace {
// can't use lambda due to nvcc limitations (The enclosing parent function ("graph_view_t") for an
// extended __device__ lambda must allow its address to be taken)
template <typename vertex_t>
struct out_of_range_t {
vertex_t major_first{};
vertex_t major_last{};
vertex_t minor_first{};
vertex_t minor_last{};
__device__ bool operator()(thrust::tuple<vertex_t, vertex_t> t)
{
auto major = thrust::get<0>(t);
auto minor = thrust::get<1>(t);
return (major < major_first) || (major >= major_last) || (minor < minor_first) ||
(minor >= minor_last);
}
};
template <bool store_transposed, typename vertex_t, typename edge_t, typename weight_t>
std::
tuple<rmm::device_uvector<edge_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>>
edgelist_to_compressed_sparse(edgelist_t<vertex_t, edge_t, weight_t> const &edgelist,
vertex_t major_first,
vertex_t major_last,
vertex_t minor_first,
vertex_t minor_last,
bool is_weighted,
hipStream_t stream)
{
rmm::device_uvector<edge_t> offsets((major_last - major_first) + 1, stream);
rmm::device_uvector<vertex_t> indices(edgelist.number_of_edges, stream);
rmm::device_uvector<weight_t> weights(is_weighted ? edgelist.number_of_edges : 0, stream);
thrust::fill(rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end(), edge_t{0});
thrust::fill(rmm::exec_policy(stream)->on(stream), indices.begin(), indices.end(), vertex_t{0});
// FIXME: need to performance test this code with R-mat graphs having highly-skewed degree
// distribution. If there is a small number of vertices with very large degrees, atomicAdd can
// sequentialize execution. CUDA9+ & Kepler+ provide complier/architectural optimizations to
// mitigate this impact
// (https://developer.nvidia.com/blog/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/),
// and we need to check this thrust::for_each based approach delivers the expected performance.
// FIXME: also need to verify this approach is at least not significantly slower than the sorting
// based approach (this approach does not use extra memory, so better stick to this approach
// unless performance is significantly worse).
auto p_offsets = offsets.data();
auto p_indices = indices.data();
auto p_weights = is_weighted ? weights.data() : static_cast<weight_t *>(nullptr);
thrust::for_each(rmm::exec_policy(stream)->on(stream),
store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices,
store_transposed ? edgelist.p_dst_vertices + edgelist.number_of_edges
: edgelist.p_src_vertices + edgelist.number_of_edges,
[p_offsets, major_first] __device__(auto v) {
atomicAdd(p_offsets + (v - major_first), edge_t{1});
});
thrust::exclusive_scan(
rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end(), offsets.begin());
if (is_weighted) {
auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(
edgelist.p_src_vertices, edgelist.p_dst_vertices, edgelist.p_edge_weights));
thrust::for_each(rmm::exec_policy(stream)->on(stream),
edge_first,
edge_first + edgelist.number_of_edges,
[p_offsets, p_indices, p_weights, major_first] __device__(auto e) {
auto s = thrust::get<0>(e);
auto d = thrust::get<1>(e);
auto w = thrust::get<2>(e);
auto major = store_transposed ? d : s;
auto minor = store_transposed ? s : d;
auto start = p_offsets[major - major_first];
auto degree = p_offsets[(major - major_first) + 1] - start;
auto idx = atomicAdd(p_indices + (start + degree - 1),
vertex_t{1}); // use the last element as a counter
// FIXME: we can actually store minor - minor_first instead of minor to save
// memory if minor can be larger than 32 bit but minor - minor_first fits
// within 32 bit
p_indices[start + idx] =
minor; // overwrite the counter only if idx == degree - 1 (no race)
p_weights[start + idx] = w;
});
} else {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist.p_src_vertices, edgelist.p_dst_vertices));
thrust::for_each(rmm::exec_policy(stream)->on(stream),
edge_first,
edge_first + edgelist.number_of_edges,
[p_offsets, p_indices, p_weights, major_first] __device__(auto e) {
auto s = thrust::get<0>(e);
auto d = thrust::get<1>(e);
auto major = store_transposed ? d : s;
auto minor = store_transposed ? s : d;
auto start = p_offsets[major - major_first];
auto degree = p_offsets[(major - major_first) + 1] - start;
auto idx = atomicAdd(p_indices + (start + degree - 1),
vertex_t{1}); // use the last element as a counter
// FIXME: we can actually store minor - minor_first instead of minor to save
// memory if minor can be larger than 32 bit but minor - minor_first fits
// within 32 bit
p_indices[start + idx] =
minor; // overwrite the counter only if idx == degree - 1 (no race)
});
}
// FIXME: need to add an option to sort neighbor lists
return std::make_tuple(std::move(offsets), std::move(indices), std::move(weights));
}
} // namespace
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
graph_t(raft::handle_t const &handle,
std::vector<edgelist_t<vertex_t, edge_t, weight_t>> const &edgelists,
partition_t<vertex_t> const &partition,
vertex_t number_of_vertices,
edge_t number_of_edges,
graph_properties_t properties,
bool sorted_by_global_degree_within_vertex_partition,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, number_of_edges, properties),
partition_(partition)
{
// cheap error checks
auto &comm = this->get_handle_ptr()->get_comms();
auto const comm_size = comm.get_size();
auto &row_comm =
this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_rank = row_comm.get_rank();
auto const row_comm_size = row_comm.get_size();
auto &col_comm =
this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_rank = col_comm.get_rank();
auto const col_comm_size = col_comm.get_size();
auto default_stream = this->get_handle_ptr()->get_stream();
CUGRAPH_EXPECTS(edgelists.size() > 0,
"Invalid input argument: edgelists.size() should be non-zero.");
CUGRAPH_EXPECTS(
std::any_of(edgelists.begin() + 1,
edgelists.end(),
[is_weighted = properties.is_weighted](auto edgelist) {
return ((edgelist.number_of_edges > 0) && (edgelist.p_src_vertices == nullptr)) ||
((edgelist.number_of_edges > 0) && (edgelist.p_dst_vertices == nullptr)) ||
(is_weighted && (edgelist.number_of_edges > 0) &&
(edgelist.p_edge_weights == nullptr)) ||
(!is_weighted && (edgelist.p_edge_weights != nullptr));
}) == false,
"Invalid input argument: edgelists[].p_src_vertices and edgelists[].p_dst_vertices should not "
"be nullptr if edgelists[].number_of_edges > 0 and edgelists[].p_edge_weights should be "
"nullptr if unweighted or should not be nullptr if weighted and edgelists[].number_of_edges > "
"0.");
CUGRAPH_EXPECTS(edgelists.size() == static_cast<size_t>(col_comm_size),
"Invalid input argument: errneous edgelists.size().");
// optional expensive checks (part 1/3)
if (do_expensive_check) {
edge_t number_of_local_edges_sum{};
for (size_t i = 0; i < edgelists.size(); ++i) {
vertex_t major_first{};
vertex_t major_last{};
vertex_t minor_first{};
vertex_t minor_last{};
std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i);
std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range();
number_of_local_edges_sum += edgelists[i].number_of_edges;
auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(
store_transposed ? edgelists[i].p_dst_vertices : edgelists[i].p_src_vertices,
store_transposed ? edgelists[i].p_src_vertices : edgelists[i].p_dst_vertices));
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(thrust::count_if(rmm::exec_policy(default_stream)->on(default_stream),
edge_first,
edge_first + edgelists[i].number_of_edges,
out_of_range_t<vertex_t>{
major_first, major_last, minor_first, minor_last}) == 0,
"Invalid input argument: edgelists[] have out-of-range values.");
}
number_of_local_edges_sum =
host_scalar_allreduce(comm, number_of_local_edges_sum, default_stream);
CUGRAPH_EXPECTS(
number_of_local_edges_sum == this->get_number_of_edges(),
"Invalid input argument: the sum of local edge counts does not match with number_of_edges.");
CUGRAPH_EXPECTS(
partition.get_vertex_partition_last(comm_size - 1) == number_of_vertices,
"Invalid input argument: vertex partition should cover [0, number_of_vertices).");
}
// convert edge list (COO) to compressed sparse format (CSR or CSC)
adj_matrix_partition_offsets_.reserve(edgelists.size());
adj_matrix_partition_indices_.reserve(edgelists.size());
adj_matrix_partition_weights_.reserve(properties.is_weighted ? edgelists.size() : 0);
for (size_t i = 0; i < edgelists.size(); ++i) {
vertex_t major_first{};
vertex_t major_last{};
vertex_t minor_first{};
vertex_t minor_last{};
std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i);
std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range();
rmm::device_uvector<edge_t> offsets(0, default_stream);
rmm::device_uvector<vertex_t> indices(0, default_stream);
rmm::device_uvector<weight_t> weights(0, default_stream);
std::tie(offsets, indices, weights) =
edgelist_to_compressed_sparse<store_transposed>(edgelists[i],
major_first,
major_last,
minor_first,
minor_last,
properties.is_weighted,
this->get_handle_ptr()->get_stream());
adj_matrix_partition_offsets_.push_back(std::move(offsets));
adj_matrix_partition_indices_.push_back(std::move(indices));
if (properties.is_weighted) { adj_matrix_partition_weights_.push_back(std::move(weights)); }
}
// update degree-based segment offsets (to be used for graph analytics kernel optimization)
if (sorted_by_global_degree_within_vertex_partition) {
auto degrees = detail::compute_major_degrees(
*(this->get_handle_ptr()), adj_matrix_partition_offsets_, partition_);
// optional expensive checks (part 2/3)
if (do_expensive_check) {
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream),
degrees.begin(),
degrees.end(),
thrust::greater<edge_t>{}),
"Invalid input argument: sorted_by_global_degree_within_vertex_partition is "
"set to true, but degrees are not non-ascending.");
}
static_assert(detail::num_segments_per_vertex_partition == 3);
static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) &&
(detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max()));
rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1,
default_stream);
std::vector<edge_t> h_thresholds = {
static_cast<edge_t>(detail::mid_degree_threshold * col_comm_size),
static_cast<edge_t>(detail::low_degree_threshold * col_comm_size)};
raft::update_device(
d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream);
rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1,
default_stream);
// temporaries are necessary because the &&-overload of device_uvector is deleted
// Note that we must sync `default_stream` before these temporaries go out of scope to
// avoid use after free. (The syncs are at the end of this function)
auto zero_vertex = vertex_t{0};
auto vertex_count = static_cast<vertex_t>(degrees.size());
segment_offsets.set_element_async(0, zero_vertex, default_stream);
segment_offsets.set_element_async(
detail::num_segments_per_vertex_partition, vertex_count, default_stream);
thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream),
degrees.begin(),
degrees.end(),
d_thresholds.begin(),
d_thresholds.end(),
segment_offsets.begin() + 1,
thrust::greater<edge_t>{});
rmm::device_uvector<vertex_t> aggregate_segment_offsets(col_comm_size * segment_offsets.size(),
default_stream);
col_comm.allgather(segment_offsets.data(),
aggregate_segment_offsets.data(),
segment_offsets.size(),
default_stream);
adj_matrix_partition_segment_offsets_.resize(aggregate_segment_offsets.size());
raft::update_host(adj_matrix_partition_segment_offsets_.data(),
aggregate_segment_offsets.data(),
aggregate_segment_offsets.size(),
default_stream);
auto status = col_comm.sync_stream(
default_stream); // this is necessary as degrees, d_thresholds, and segment_offsets will
// become out-of-scope once control flow exits this block and
// adj_matrix_partition_segment_offsets_ can be used right after return.
CUGRAPH_EXPECTS(status == raft::comms::status_t::SUCCESS, "sync_stream() failure.");
}
// optional expensive checks (part 3/3)
if (do_expensive_check) {
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<!multi_gpu>>::
graph_t(raft::handle_t const &handle,
edgelist_t<vertex_t, edge_t, weight_t> const &edgelist,
vertex_t number_of_vertices,
graph_properties_t properties,
bool sorted_by_degree,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, edgelist.number_of_edges, properties),
offsets_(rmm::device_uvector<edge_t>(0, handle.get_stream())),
indices_(rmm::device_uvector<vertex_t>(0, handle.get_stream())),
weights_(rmm::device_uvector<weight_t>(0, handle.get_stream()))
{
// cheap error checks
auto default_stream = this->get_handle_ptr()->get_stream();
CUGRAPH_EXPECTS(
((edgelist.number_of_edges == 0) || (edgelist.p_src_vertices != nullptr)) &&
((edgelist.number_of_edges == 0) || (edgelist.p_dst_vertices != nullptr)) &&
((properties.is_weighted &&
((edgelist.number_of_edges == 0) || (edgelist.p_edge_weights != nullptr))) ||
(!properties.is_weighted && (edgelist.p_edge_weights == nullptr))),
"Invalid input argument: edgelist.p_src_vertices and edgelist.p_dst_vertices should "
"not be nullptr if edgelist.number_of_edges > 0 and edgelist.p_edge_weights should be nullptr "
"if unweighted or should not be nullptr if weighted and edgelist.number_of_edges > 0.");
// optional expensive checks (part 1/2)
if (do_expensive_check) {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices,
store_transposed ? edgelist.p_src_vertices : edgelist.p_dst_vertices));
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(thrust::count_if(
rmm::exec_policy(default_stream)->on(default_stream),
edge_first,
edge_first + edgelist.number_of_edges,
out_of_range_t<vertex_t>{
0, this->get_number_of_vertices(), 0, this->get_number_of_vertices()}) == 0,
"Invalid input argument: edgelist have out-of-range values.");
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
// convert edge list (COO) to compressed sparse format (CSR or CSC)
std::tie(offsets_, indices_, weights_) =
edgelist_to_compressed_sparse<store_transposed>(edgelist,
vertex_t{0},
this->get_number_of_vertices(),
vertex_t{0},
this->get_number_of_vertices(),
properties.is_weighted,
this->get_handle_ptr()->get_stream());
// update degree-based segment offsets (to be used for graph analytics kernel optimization)
if (sorted_by_degree) {
auto degree_first = thrust::make_transform_iterator(
thrust::make_counting_iterator(vertex_t{0}),
detail::degree_from_offsets_t<vertex_t, edge_t>{offsets_.data()});
// optional expensive checks (part 2/2)
if (do_expensive_check) {
CUGRAPH_EXPECTS(
thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream),
degree_first,
degree_first + this->get_number_of_vertices(),
thrust::greater<edge_t>{}),
"Invalid input argument: sorted_by_degree is set to true, but degrees are not "
"non-ascending.");
}
static_assert(detail::num_segments_per_vertex_partition == 3);
static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) &&
(detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max()));
rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1,
default_stream);
std::vector<edge_t> h_thresholds = {static_cast<edge_t>(detail::mid_degree_threshold),
static_cast<edge_t>(detail::low_degree_threshold)};
raft::update_device(
d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream);
rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1,
default_stream);
// temporaries are necessary because the &&-overload of device_uvector is deleted
// Note that we must sync `default_stream` before these temporaries go out of scope to
// avoid use after free. (The syncs are at the end of this function)
auto zero_vertex = vertex_t{0};
auto vertex_count = static_cast<vertex_t>(this->get_number_of_vertices());
segment_offsets.set_element_async(0, zero_vertex, default_stream);
segment_offsets.set_element_async(
detail::num_segments_per_vertex_partition, vertex_count, default_stream);
thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream),
degree_first,
degree_first + this->get_number_of_vertices(),
d_thresholds.begin(),
d_thresholds.end(),
segment_offsets.begin() + 1,
thrust::greater<edge_t>{});
segment_offsets_.resize(segment_offsets.size());
raft::update_host(
segment_offsets_.data(), segment_offsets.data(), segment_offsets.size(), default_stream);
CUDA_TRY(hipStreamSynchronize(
default_stream)); // this is necessary as segment_offsets_ can be used right after return.
}
// optional expensive checks (part 3/3)
if (do_expensive_check) {
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
// explicit instantiation
template class graph_t<int32_t, int32_t, float, true, true>;
template class graph_t<int32_t, int32_t, float, false, true>;
template class graph_t<int32_t, int32_t, double, true, true>;
template class graph_t<int32_t, int32_t, double, false, true>;
template class graph_t<int32_t, int64_t, float, true, true>;
template class graph_t<int32_t, int64_t, float, false, true>;
template class graph_t<int32_t, int64_t, double, true, true>;
template class graph_t<int32_t, int64_t, double, false, true>;
template class graph_t<int64_t, int64_t, float, true, true>;
template class graph_t<int64_t, int64_t, float, false, true>;
template class graph_t<int64_t, int64_t, double, true, true>;
template class graph_t<int64_t, int64_t, double, false, true>;
//
template class graph_t<int32_t, int32_t, float, true, false>;
template class graph_t<int32_t, int32_t, float, false, false>;
template class graph_t<int32_t, int32_t, double, true, false>;
template class graph_t<int32_t, int32_t, double, false, false>;
template class graph_t<int32_t, int64_t, float, true, false>;
template class graph_t<int32_t, int64_t, float, false, false>;
template class graph_t<int32_t, int64_t, double, true, false>;
template class graph_t<int32_t, int64_t, double, false, false>;
template class graph_t<int64_t, int64_t, float, true, false>;
template class graph_t<int64_t, int64_t, float, false, false>;
template class graph_t<int64_t, int64_t, double, true, false>;
template class graph_t<int64_t, int64_t, double, false, false>;
} // namespace experimental
} // namespace cugraph
#include <cugraph/experimental/eidir_graph.hpp>
| 92a4b03fb9a2efc0c3331569b2c2f562f8895c62.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/partition_manager.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/host_scalar_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/device_atomics.cuh>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <tuple>
namespace cugraph {
namespace experimental {
namespace {
// can't use lambda due to nvcc limitations (The enclosing parent function ("graph_view_t") for an
// extended __device__ lambda must allow its address to be taken)
template <typename vertex_t>
struct out_of_range_t {
vertex_t major_first{};
vertex_t major_last{};
vertex_t minor_first{};
vertex_t minor_last{};
__device__ bool operator()(thrust::tuple<vertex_t, vertex_t> t)
{
auto major = thrust::get<0>(t);
auto minor = thrust::get<1>(t);
return (major < major_first) || (major >= major_last) || (minor < minor_first) ||
(minor >= minor_last);
}
};
template <bool store_transposed, typename vertex_t, typename edge_t, typename weight_t>
std::
tuple<rmm::device_uvector<edge_t>, rmm::device_uvector<vertex_t>, rmm::device_uvector<weight_t>>
edgelist_to_compressed_sparse(edgelist_t<vertex_t, edge_t, weight_t> const &edgelist,
vertex_t major_first,
vertex_t major_last,
vertex_t minor_first,
vertex_t minor_last,
bool is_weighted,
cudaStream_t stream)
{
rmm::device_uvector<edge_t> offsets((major_last - major_first) + 1, stream);
rmm::device_uvector<vertex_t> indices(edgelist.number_of_edges, stream);
rmm::device_uvector<weight_t> weights(is_weighted ? edgelist.number_of_edges : 0, stream);
thrust::fill(rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end(), edge_t{0});
thrust::fill(rmm::exec_policy(stream)->on(stream), indices.begin(), indices.end(), vertex_t{0});
// FIXME: need to performance test this code with R-mat graphs having highly-skewed degree
// distribution. If there is a small number of vertices with very large degrees, atomicAdd can
// sequentialize execution. CUDA9+ & Kepler+ provide complier/architectural optimizations to
// mitigate this impact
// (https://developer.nvidia.com/blog/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/),
// and we need to check this thrust::for_each based approach delivers the expected performance.
// FIXME: also need to verify this approach is at least not significantly slower than the sorting
// based approach (this approach does not use extra memory, so better stick to this approach
// unless performance is significantly worse).
auto p_offsets = offsets.data();
auto p_indices = indices.data();
auto p_weights = is_weighted ? weights.data() : static_cast<weight_t *>(nullptr);
thrust::for_each(rmm::exec_policy(stream)->on(stream),
store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices,
store_transposed ? edgelist.p_dst_vertices + edgelist.number_of_edges
: edgelist.p_src_vertices + edgelist.number_of_edges,
[p_offsets, major_first] __device__(auto v) {
atomicAdd(p_offsets + (v - major_first), edge_t{1});
});
thrust::exclusive_scan(
rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end(), offsets.begin());
if (is_weighted) {
auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(
edgelist.p_src_vertices, edgelist.p_dst_vertices, edgelist.p_edge_weights));
thrust::for_each(rmm::exec_policy(stream)->on(stream),
edge_first,
edge_first + edgelist.number_of_edges,
[p_offsets, p_indices, p_weights, major_first] __device__(auto e) {
auto s = thrust::get<0>(e);
auto d = thrust::get<1>(e);
auto w = thrust::get<2>(e);
auto major = store_transposed ? d : s;
auto minor = store_transposed ? s : d;
auto start = p_offsets[major - major_first];
auto degree = p_offsets[(major - major_first) + 1] - start;
auto idx = atomicAdd(p_indices + (start + degree - 1),
vertex_t{1}); // use the last element as a counter
// FIXME: we can actually store minor - minor_first instead of minor to save
// memory if minor can be larger than 32 bit but minor - minor_first fits
// within 32 bit
p_indices[start + idx] =
minor; // overwrite the counter only if idx == degree - 1 (no race)
p_weights[start + idx] = w;
});
} else {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(edgelist.p_src_vertices, edgelist.p_dst_vertices));
thrust::for_each(rmm::exec_policy(stream)->on(stream),
edge_first,
edge_first + edgelist.number_of_edges,
[p_offsets, p_indices, p_weights, major_first] __device__(auto e) {
auto s = thrust::get<0>(e);
auto d = thrust::get<1>(e);
auto major = store_transposed ? d : s;
auto minor = store_transposed ? s : d;
auto start = p_offsets[major - major_first];
auto degree = p_offsets[(major - major_first) + 1] - start;
auto idx = atomicAdd(p_indices + (start + degree - 1),
vertex_t{1}); // use the last element as a counter
// FIXME: we can actually store minor - minor_first instead of minor to save
// memory if minor can be larger than 32 bit but minor - minor_first fits
// within 32 bit
p_indices[start + idx] =
minor; // overwrite the counter only if idx == degree - 1 (no race)
});
}
// FIXME: need to add an option to sort neighbor lists
return std::make_tuple(std::move(offsets), std::move(indices), std::move(weights));
}
} // namespace
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
graph_t(raft::handle_t const &handle,
std::vector<edgelist_t<vertex_t, edge_t, weight_t>> const &edgelists,
partition_t<vertex_t> const &partition,
vertex_t number_of_vertices,
edge_t number_of_edges,
graph_properties_t properties,
bool sorted_by_global_degree_within_vertex_partition,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, number_of_edges, properties),
partition_(partition)
{
// cheap error checks
auto &comm = this->get_handle_ptr()->get_comms();
auto const comm_size = comm.get_size();
auto &row_comm =
this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().row_name());
auto const row_comm_rank = row_comm.get_rank();
auto const row_comm_size = row_comm.get_size();
auto &col_comm =
this->get_handle_ptr()->get_subcomm(cugraph::partition_2d::key_naming_t().col_name());
auto const col_comm_rank = col_comm.get_rank();
auto const col_comm_size = col_comm.get_size();
auto default_stream = this->get_handle_ptr()->get_stream();
CUGRAPH_EXPECTS(edgelists.size() > 0,
"Invalid input argument: edgelists.size() should be non-zero.");
CUGRAPH_EXPECTS(
std::any_of(edgelists.begin() + 1,
edgelists.end(),
[is_weighted = properties.is_weighted](auto edgelist) {
return ((edgelist.number_of_edges > 0) && (edgelist.p_src_vertices == nullptr)) ||
((edgelist.number_of_edges > 0) && (edgelist.p_dst_vertices == nullptr)) ||
(is_weighted && (edgelist.number_of_edges > 0) &&
(edgelist.p_edge_weights == nullptr)) ||
(!is_weighted && (edgelist.p_edge_weights != nullptr));
}) == false,
"Invalid input argument: edgelists[].p_src_vertices and edgelists[].p_dst_vertices should not "
"be nullptr if edgelists[].number_of_edges > 0 and edgelists[].p_edge_weights should be "
"nullptr if unweighted or should not be nullptr if weighted and edgelists[].number_of_edges > "
"0.");
CUGRAPH_EXPECTS(edgelists.size() == static_cast<size_t>(col_comm_size),
"Invalid input argument: errneous edgelists.size().");
// optional expensive checks (part 1/3)
if (do_expensive_check) {
edge_t number_of_local_edges_sum{};
for (size_t i = 0; i < edgelists.size(); ++i) {
vertex_t major_first{};
vertex_t major_last{};
vertex_t minor_first{};
vertex_t minor_last{};
std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i);
std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range();
number_of_local_edges_sum += edgelists[i].number_of_edges;
auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(
store_transposed ? edgelists[i].p_dst_vertices : edgelists[i].p_src_vertices,
store_transposed ? edgelists[i].p_src_vertices : edgelists[i].p_dst_vertices));
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(thrust::count_if(rmm::exec_policy(default_stream)->on(default_stream),
edge_first,
edge_first + edgelists[i].number_of_edges,
out_of_range_t<vertex_t>{
major_first, major_last, minor_first, minor_last}) == 0,
"Invalid input argument: edgelists[] have out-of-range values.");
}
number_of_local_edges_sum =
host_scalar_allreduce(comm, number_of_local_edges_sum, default_stream);
CUGRAPH_EXPECTS(
number_of_local_edges_sum == this->get_number_of_edges(),
"Invalid input argument: the sum of local edge counts does not match with number_of_edges.");
CUGRAPH_EXPECTS(
partition.get_vertex_partition_last(comm_size - 1) == number_of_vertices,
"Invalid input argument: vertex partition should cover [0, number_of_vertices).");
}
// convert edge list (COO) to compressed sparse format (CSR or CSC)
adj_matrix_partition_offsets_.reserve(edgelists.size());
adj_matrix_partition_indices_.reserve(edgelists.size());
adj_matrix_partition_weights_.reserve(properties.is_weighted ? edgelists.size() : 0);
for (size_t i = 0; i < edgelists.size(); ++i) {
vertex_t major_first{};
vertex_t major_last{};
vertex_t minor_first{};
vertex_t minor_last{};
std::tie(major_first, major_last) = partition.get_matrix_partition_major_range(i);
std::tie(minor_first, minor_last) = partition.get_matrix_partition_minor_range();
rmm::device_uvector<edge_t> offsets(0, default_stream);
rmm::device_uvector<vertex_t> indices(0, default_stream);
rmm::device_uvector<weight_t> weights(0, default_stream);
std::tie(offsets, indices, weights) =
edgelist_to_compressed_sparse<store_transposed>(edgelists[i],
major_first,
major_last,
minor_first,
minor_last,
properties.is_weighted,
this->get_handle_ptr()->get_stream());
adj_matrix_partition_offsets_.push_back(std::move(offsets));
adj_matrix_partition_indices_.push_back(std::move(indices));
if (properties.is_weighted) { adj_matrix_partition_weights_.push_back(std::move(weights)); }
}
// update degree-based segment offsets (to be used for graph analytics kernel optimization)
if (sorted_by_global_degree_within_vertex_partition) {
auto degrees = detail::compute_major_degrees(
*(this->get_handle_ptr()), adj_matrix_partition_offsets_, partition_);
// optional expensive checks (part 2/3)
if (do_expensive_check) {
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream),
degrees.begin(),
degrees.end(),
thrust::greater<edge_t>{}),
"Invalid input argument: sorted_by_global_degree_within_vertex_partition is "
"set to true, but degrees are not non-ascending.");
}
static_assert(detail::num_segments_per_vertex_partition == 3);
static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) &&
(detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max()));
rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1,
default_stream);
std::vector<edge_t> h_thresholds = {
static_cast<edge_t>(detail::mid_degree_threshold * col_comm_size),
static_cast<edge_t>(detail::low_degree_threshold * col_comm_size)};
raft::update_device(
d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream);
rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1,
default_stream);
// temporaries are necessary because the &&-overload of device_uvector is deleted
// Note that we must sync `default_stream` before these temporaries go out of scope to
// avoid use after free. (The syncs are at the end of this function)
auto zero_vertex = vertex_t{0};
auto vertex_count = static_cast<vertex_t>(degrees.size());
segment_offsets.set_element_async(0, zero_vertex, default_stream);
segment_offsets.set_element_async(
detail::num_segments_per_vertex_partition, vertex_count, default_stream);
thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream),
degrees.begin(),
degrees.end(),
d_thresholds.begin(),
d_thresholds.end(),
segment_offsets.begin() + 1,
thrust::greater<edge_t>{});
rmm::device_uvector<vertex_t> aggregate_segment_offsets(col_comm_size * segment_offsets.size(),
default_stream);
col_comm.allgather(segment_offsets.data(),
aggregate_segment_offsets.data(),
segment_offsets.size(),
default_stream);
adj_matrix_partition_segment_offsets_.resize(aggregate_segment_offsets.size());
raft::update_host(adj_matrix_partition_segment_offsets_.data(),
aggregate_segment_offsets.data(),
aggregate_segment_offsets.size(),
default_stream);
auto status = col_comm.sync_stream(
default_stream); // this is necessary as degrees, d_thresholds, and segment_offsets will
// become out-of-scope once control flow exits this block and
// adj_matrix_partition_segment_offsets_ can be used right after return.
CUGRAPH_EXPECTS(status == raft::comms::status_t::SUCCESS, "sync_stream() failure.");
}
// optional expensive checks (part 3/3)
if (do_expensive_check) {
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<!multi_gpu>>::
graph_t(raft::handle_t const &handle,
edgelist_t<vertex_t, edge_t, weight_t> const &edgelist,
vertex_t number_of_vertices,
graph_properties_t properties,
bool sorted_by_degree,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, edgelist.number_of_edges, properties),
offsets_(rmm::device_uvector<edge_t>(0, handle.get_stream())),
indices_(rmm::device_uvector<vertex_t>(0, handle.get_stream())),
weights_(rmm::device_uvector<weight_t>(0, handle.get_stream()))
{
// cheap error checks
auto default_stream = this->get_handle_ptr()->get_stream();
CUGRAPH_EXPECTS(
((edgelist.number_of_edges == 0) || (edgelist.p_src_vertices != nullptr)) &&
((edgelist.number_of_edges == 0) || (edgelist.p_dst_vertices != nullptr)) &&
((properties.is_weighted &&
((edgelist.number_of_edges == 0) || (edgelist.p_edge_weights != nullptr))) ||
(!properties.is_weighted && (edgelist.p_edge_weights == nullptr))),
"Invalid input argument: edgelist.p_src_vertices and edgelist.p_dst_vertices should "
"not be nullptr if edgelist.number_of_edges > 0 and edgelist.p_edge_weights should be nullptr "
"if unweighted or should not be nullptr if weighted and edgelist.number_of_edges > 0.");
// optional expensive checks (part 1/2)
if (do_expensive_check) {
auto edge_first = thrust::make_zip_iterator(
thrust::make_tuple(store_transposed ? edgelist.p_dst_vertices : edgelist.p_src_vertices,
store_transposed ? edgelist.p_src_vertices : edgelist.p_dst_vertices));
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(thrust::count_if(
rmm::exec_policy(default_stream)->on(default_stream),
edge_first,
edge_first + edgelist.number_of_edges,
out_of_range_t<vertex_t>{
0, this->get_number_of_vertices(), 0, this->get_number_of_vertices()}) == 0,
"Invalid input argument: edgelist have out-of-range values.");
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
// convert edge list (COO) to compressed sparse format (CSR or CSC)
std::tie(offsets_, indices_, weights_) =
edgelist_to_compressed_sparse<store_transposed>(edgelist,
vertex_t{0},
this->get_number_of_vertices(),
vertex_t{0},
this->get_number_of_vertices(),
properties.is_weighted,
this->get_handle_ptr()->get_stream());
// update degree-based segment offsets (to be used for graph analytics kernel optimization)
if (sorted_by_degree) {
auto degree_first = thrust::make_transform_iterator(
thrust::make_counting_iterator(vertex_t{0}),
detail::degree_from_offsets_t<vertex_t, edge_t>{offsets_.data()});
// optional expensive checks (part 2/2)
if (do_expensive_check) {
CUGRAPH_EXPECTS(
thrust::is_sorted(rmm::exec_policy(default_stream)->on(default_stream),
degree_first,
degree_first + this->get_number_of_vertices(),
thrust::greater<edge_t>{}),
"Invalid input argument: sorted_by_degree is set to true, but degrees are not "
"non-ascending.");
}
static_assert(detail::num_segments_per_vertex_partition == 3);
static_assert((detail::low_degree_threshold <= detail::mid_degree_threshold) &&
(detail::mid_degree_threshold <= std::numeric_limits<edge_t>::max()));
rmm::device_uvector<edge_t> d_thresholds(detail::num_segments_per_vertex_partition - 1,
default_stream);
std::vector<edge_t> h_thresholds = {static_cast<edge_t>(detail::mid_degree_threshold),
static_cast<edge_t>(detail::low_degree_threshold)};
raft::update_device(
d_thresholds.data(), h_thresholds.data(), h_thresholds.size(), default_stream);
rmm::device_uvector<vertex_t> segment_offsets(detail::num_segments_per_vertex_partition + 1,
default_stream);
// temporaries are necessary because the &&-overload of device_uvector is deleted
// Note that we must sync `default_stream` before these temporaries go out of scope to
// avoid use after free. (The syncs are at the end of this function)
auto zero_vertex = vertex_t{0};
auto vertex_count = static_cast<vertex_t>(this->get_number_of_vertices());
segment_offsets.set_element_async(0, zero_vertex, default_stream);
segment_offsets.set_element_async(
detail::num_segments_per_vertex_partition, vertex_count, default_stream);
thrust::upper_bound(rmm::exec_policy(default_stream)->on(default_stream),
degree_first,
degree_first + this->get_number_of_vertices(),
d_thresholds.begin(),
d_thresholds.end(),
segment_offsets.begin() + 1,
thrust::greater<edge_t>{});
segment_offsets_.resize(segment_offsets.size());
raft::update_host(
segment_offsets_.data(), segment_offsets.data(), segment_offsets.size(), default_stream);
CUDA_TRY(cudaStreamSynchronize(
default_stream)); // this is necessary as segment_offsets_ can be used right after return.
}
// optional expensive checks (part 3/3)
if (do_expensive_check) {
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
// explicit instantiation
template class graph_t<int32_t, int32_t, float, true, true>;
template class graph_t<int32_t, int32_t, float, false, true>;
template class graph_t<int32_t, int32_t, double, true, true>;
template class graph_t<int32_t, int32_t, double, false, true>;
template class graph_t<int32_t, int64_t, float, true, true>;
template class graph_t<int32_t, int64_t, float, false, true>;
template class graph_t<int32_t, int64_t, double, true, true>;
template class graph_t<int32_t, int64_t, double, false, true>;
template class graph_t<int64_t, int64_t, float, true, true>;
template class graph_t<int64_t, int64_t, float, false, true>;
template class graph_t<int64_t, int64_t, double, true, true>;
template class graph_t<int64_t, int64_t, double, false, true>;
//
template class graph_t<int32_t, int32_t, float, true, false>;
template class graph_t<int32_t, int32_t, float, false, false>;
template class graph_t<int32_t, int32_t, double, true, false>;
template class graph_t<int32_t, int32_t, double, false, false>;
template class graph_t<int32_t, int64_t, float, true, false>;
template class graph_t<int32_t, int64_t, float, false, false>;
template class graph_t<int32_t, int64_t, double, true, false>;
template class graph_t<int32_t, int64_t, double, false, false>;
template class graph_t<int64_t, int64_t, float, true, false>;
template class graph_t<int64_t, int64_t, float, false, false>;
template class graph_t<int64_t, int64_t, double, true, false>;
template class graph_t<int64_t, int64_t, double, false, false>;
} // namespace experimental
} // namespace cugraph
#include <cugraph/experimental/eidir_graph.hpp>
|
0fab17e9d787473b5553bd5705bfbd0acbabae07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LinearSys.cuh"
#include <stdio.h>
#include <assert.h>
__device__ __constant__ LinearSys_SysParams const_sys_params;
LinearSys_SysParams const_sys_params_cpu;
bool LinearSys_commitSysParams(const LinearSys_SysParams & sysparams) {
hipError_t res = hipMemcpyToSymbol(
const_sys_params,
&sysparams,
sizeof(LinearSys_SysParams),
0,
hipMemcpyHostToDevice
);
const_sys_params_cpu = sysparams;
return res == hipSuccess;
}
template <typename T>
__global__ void ___generateDomain(
const CUDA_Volume binaryMask,
double value_zero,
double value_one,
CUDA_Volume output
) {
VOLUME_VOX_GUARD(output.res);
//Read mask
uchar c = read<uchar>(binaryMask.surf, vox);
//Write value
write<T>(output.surf, vox, (c > 0) ? T(value_one) : T(value_zero));
}
void LinearSys_GenerateDomain(
const CUDA_Volume & binaryMask,
double value_zero,
double value_one,
CUDA_Volume & output
) {
BLOCKS3D(8, output.res);
if (binaryMask.type != TYPE_UCHAR) {
exit(1);
}
if (output.type == TYPE_DOUBLE) {
___generateDomain<double> << < numBlocks, block >> > (
binaryMask,
value_zero,
value_one,
output
);
}
else if(output.type == TYPE_FLOAT) {
___generateDomain<float> << < numBlocks, block >> > (
binaryMask,
value_zero,
value_one,
output
);
}
else {
exit(2);
}
}
// Lin.sys at top level
template <typename T>
__device__ void getSystemTopKernel (
const CUDA_Volume & domain,
const uint3 & vox,
CUDA_Stencil_7 * out,
T * f,
T * x
) {
T Di = read<T>(domain.surf, vox);
T Dneg[3] = {
(read<T>(domain.surf, clampedVox(domain.res, vox, X_NEG)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Y_NEG)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Z_NEG)) + Di) * T(0.5)
};
T Dpos[3] = {
(read<T>(domain.surf, clampedVox(domain.res, vox, X_POS)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Y_POS)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Z_POS)) + Di) * T(0.5)
};
T coeffs[7];
bool useInMatrix[7];
coeffs[DIR_NONE] = T(0);
useInMatrix[DIR_NONE] = true;
for (uint j = 0; j < DIR_NONE; j++) {
const uint k = _getDirIndex(Dir(j));
const int sgn = _getDirSgn(Dir(j));
const T Dface = (sgn == -1) ? Dneg[k] : Dpos[k];
T cellDist[3] = { T(const_sys_params.cellDim[0]) , T(const_sys_params.cellDim[1]), T(const_sys_params.cellDim[2]) };
useInMatrix[j] = true;
if ((_at<uint>(vox, k) == 0 && sgn == -1) ||
(_at<uint>(vox, k) == _at<uint>(domain.res, k) - 1 && sgn == 1)
) {
cellDist[k] = const_sys_params.cellDim[k] * T(0.5);
useInMatrix[j] = false;
}
coeffs[j] = (Dface * const_sys_params.faceArea[k]) / cellDist[k];
//Subtract from diagonal
if (useInMatrix[j] || k == const_sys_params.dirPrimary)
coeffs[DIR_NONE] -= coeffs[j];
}
/*
Calculate right hand side
*/
const uint primaryRes = ((uint*)&domain.res)[const_sys_params.dirPrimary];
T rhs = T(0);
if (_at<uint>(vox, const_sys_params.dirPrimary) == 0) {
Dir dir = _getDir(const_sys_params.dirPrimary, -1);
rhs -= coeffs[dir] * const_sys_params.concetrationBegin;
}
else if (_at<uint>(vox, const_sys_params.dirPrimary) == primaryRes - 1) {
Dir dir = _getDir(const_sys_params.dirPrimary, 1);
rhs -= coeffs[dir] * const_sys_params.concetrationEnd;
}
*f = rhs;
/*
Initial guess
*/
uint primaryVox = _at<uint>(vox, const_sys_params.dirPrimary);
if (_getDirSgn(const_sys_params.dir) == 1)
*x = 1.0f - (primaryVox / T(primaryRes + 1));
else
*x = (primaryVox / T(primaryRes + 1));
#pragma unroll
for (uint j = 0; j < DIR_NONE; j++) {
if (!useInMatrix[j])
coeffs[j] = T(0);
}
#pragma unroll
for (uint i = 0; i < 7; i++) {
out->v[i] = coeffs[i];
}
}
template<typename T>
void __global__ ___systemTopKernel(
CUDA_Volume domain,
CUDA_Stencil_7 * A,
CUDA_Volume f
) {
VOLUME_VOX_GUARD(domain.res);
size_t i = _linearIndex(domain.res, vox);
T fval = 0.0;
T xval = 0.0;
getSystemTopKernel<T>(domain, vox, &A[i], &fval, &xval);
write<T>(f.surf, vox, fval);
}
template<typename T>
void __global__ ___systemTopKernelWithGuess(
CUDA_Volume domain,
CUDA_Stencil_7 * A,
CUDA_Volume f,
CUDA_Volume x
) {
VOLUME_VOX_GUARD(domain.res);
size_t i = _linearIndex(domain.res, vox);
T fval = 0.0;
T xval = 0.0;
getSystemTopKernel<T>(domain, vox, &A[i], &fval, &xval);
write<T>(f.surf, vox, fval);
write<T>(x.surf, vox, xval);
}
void LinearSys_GenerateSystem(
const CUDA_Volume & domain,
CUDA_Stencil_7 * A0,
CUDA_Volume & f,
CUDA_Volume * x
) {
assert(domain.type == f.type);
if (x) {
assert(domain.type == x->type);
}
assert(domain.type == TYPE_DOUBLE || domain.type == TYPE_FLOAT);
BLOCKS3D(2, domain.res);
if (x == nullptr) {
if (domain.type == TYPE_DOUBLE)
___systemTopKernel<double><< < numBlocks, block >> > (domain,A0,f);
else
___systemTopKernel<float> << < numBlocks, block >> > (domain, A0, f);
}
else {
if (domain.type == TYPE_DOUBLE)
___systemTopKernelWithGuess <double> << < numBlocks, block >> > (domain, A0, f, *x);
else
___systemTopKernelWithGuess <float> << < numBlocks, block >> > (domain, A0, f, *x);
}
}
template <typename T>
__global__ void ___invertDiagKernel(
const CUDA_Stencil_7 * A,
CUDA_Volume ainvert
) {
VOLUME_VOX_GUARD(ainvert.res);
const size_t I = _linearIndex(ainvert.res, vox);
const T iadiag = T(1.0) / A[I].v[DIR_NONE];
write<T>(ainvert.surf, vox, iadiag);
}
void LinearSys_InvertSystemDiagTo(
const CUDA_Stencil_7 * A,
CUDA_Volume & ainvert
) {
assert(ainvert.type == TYPE_DOUBLE || ainvert.type == TYPE_FLOAT);
BLOCKS3D(4, ainvert.res);
if (ainvert.type == TYPE_DOUBLE) {
___invertDiagKernel<double> << < numBlocks, block >> > (A, ainvert);
}
else {
___invertDiagKernel<float> << < numBlocks, block >> > (A, ainvert);
}
}
__global__ void __residualKernel(
const CUDA_Stencil_7 * A0,
const CUDA_Volume x,
const CUDA_Volume f,
CUDA_Volume r
) {
VOLUME_IVOX_GUARD(x.res);
const size_t I = _linearIndex(x.res, ivox);
const double Axval = convolve3D_SystemTop(ivox, A0[I], x);
const double fval = read<double>(f.surf, ivox);
const double rval = fval - Axval;
write<double>(r.surf, ivox, rval);
}
void LinearSys_Residual(
const CUDA_Stencil_7 * A0,
const CUDA_Volume & x,
const CUDA_Volume & f,
CUDA_Volume & r
) {
BLOCKS3D(8, x.res);
__residualKernel << < numBlocks, block >> > (A0, x, f, r);
}
__global__ void ___matrixVectorProductKernel(
const CUDA_Stencil_7 * A,
const CUDA_Volume x,
CUDA_Volume b
) {
VOLUME_IVOX_GUARD(x.res);
const size_t I = _linearIndex(x.res, ivox);
const double Axval = convolve3D_SystemTop(ivox, A[I], x);
write<double>(b.surf, ivox, Axval);
}
template<int blockSize, int apronSize>
double __device__ convolve3D_SystemTop_Shared(
const int3 & relVox, //in block relative voxel
const CUDA_Stencil_7 & k,
double * x
) {
double sum = 0.0;
//Index in apron
const int index = _linearIndex(make_int3(apronSize), relVox + make_int3(1));
const int3 stride = make_int3(1, apronSize, apronSize*apronSize);
sum += k.v[X_POS] * x[index + stride.x];
sum += k.v[X_NEG] * x[index - stride.x];
sum += k.v[Y_POS] * x[index + stride.y];
sum += k.v[Y_NEG] * x[index - stride.y];
sum += k.v[Z_POS] * x[index + stride.z];
sum += k.v[Z_NEG] * x[index - stride.z];
sum += k.v[DIR_NONE] * x[index];
return sum;
}
template <int blockSize>
__global__ void ___matrixVectorProductKernelShared(
const CUDA_Stencil_7 * A,
const CUDA_Volume x,
CUDA_Volume b
) {
const int apronSize = blockSize + 2;
const int totalBlockSize = blockSize*blockSize*blockSize;
const int totalApronSize = apronSize * apronSize * apronSize;
const int perThread = (totalApronSize + totalBlockSize - 1) / totalBlockSize;
__shared__ double _s[totalApronSize];
VOLUME_BLOCK_IVOX; //defines blockIvox
//Position of apron
const int3 apronIvox = blockIvox - make_int3(1);
//Load apron to shared memory
int tid = _linearIndex(blockDim, threadIdx);
/*size_t blockIndex = _linearIndex(gridDim, blockIdx);
if (blockIndex == 1) {
printf("tid: %d, <%d, %d)\n", tid, min(tid*perThread, totalApronSize), min((tid+1)*perThread, totalApronSize));
}*/
// int3 relativeLoadVoxel = apronIvox +
for (int i = 0; i < perThread; i++) {
const int targetIndex = tid + i*totalBlockSize;
//const int targetIndex = tid*perThread + i;
if (targetIndex >= totalApronSize)
break;
const int3 targetPos = apronIvox + posFromLinear(make_int3(apronSize), targetIndex);
if (_isValidPos(x.res, targetPos))
_s[targetIndex] = read<double>(x.surf, targetPos);
else
_s[targetIndex] = 0.0;
}
__syncthreads();
//VOLUME_IVOX_GUARD(x.res);
//write<double>(b.surf, ivox, 1.0);
VOLUME_IVOX_GUARD(x.res);
const size_t I = _linearIndex(x.res, ivox);
CUDA_Stencil_7 a = A[I];
const double Axval = convolve3D_SystemTop_Shared<blockSize, apronSize>(make_int3(threadIdx.x, threadIdx.y, threadIdx.z), a, _s);
//const double Axval = convolve3D_SystemTop(ivox, A[I], x);
write<double>(b.surf, ivox, Axval);
}
void LinearSys_MatrixVectorProduct(
const CUDA_Stencil_7 * A,
const CUDA_Volume & x,
CUDA_Volume & b
) {
const bool optimized = true;
if (!optimized) {
BLOCKS3D(8, x.res);
___matrixVectorProductKernel << < numBlocks, block >> > (A, x, b);
}
else {
const int blockSize = 8;
BLOCKS3D(blockSize, x.res);
___matrixVectorProductKernelShared<blockSize> << < numBlocks, block >> > (A, x, b);
}
} | 0fab17e9d787473b5553bd5705bfbd0acbabae07.cu | #include "LinearSys.cuh"
#include <stdio.h>
#include <assert.h>
__device__ __constant__ LinearSys_SysParams const_sys_params;
LinearSys_SysParams const_sys_params_cpu;
bool LinearSys_commitSysParams(const LinearSys_SysParams & sysparams) {
cudaError_t res = cudaMemcpyToSymbol(
const_sys_params,
&sysparams,
sizeof(LinearSys_SysParams),
0,
cudaMemcpyHostToDevice
);
const_sys_params_cpu = sysparams;
return res == cudaSuccess;
}
template <typename T>
__global__ void ___generateDomain(
const CUDA_Volume binaryMask,
double value_zero,
double value_one,
CUDA_Volume output
) {
VOLUME_VOX_GUARD(output.res);
//Read mask
uchar c = read<uchar>(binaryMask.surf, vox);
//Write value
write<T>(output.surf, vox, (c > 0) ? T(value_one) : T(value_zero));
}
void LinearSys_GenerateDomain(
const CUDA_Volume & binaryMask,
double value_zero,
double value_one,
CUDA_Volume & output
) {
BLOCKS3D(8, output.res);
if (binaryMask.type != TYPE_UCHAR) {
exit(1);
}
if (output.type == TYPE_DOUBLE) {
___generateDomain<double> << < numBlocks, block >> > (
binaryMask,
value_zero,
value_one,
output
);
}
else if(output.type == TYPE_FLOAT) {
___generateDomain<float> << < numBlocks, block >> > (
binaryMask,
value_zero,
value_one,
output
);
}
else {
exit(2);
}
}
// Lin.sys at top level
template <typename T>
__device__ void getSystemTopKernel (
const CUDA_Volume & domain,
const uint3 & vox,
CUDA_Stencil_7 * out,
T * f,
T * x
) {
T Di = read<T>(domain.surf, vox);
T Dneg[3] = {
(read<T>(domain.surf, clampedVox(domain.res, vox, X_NEG)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Y_NEG)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Z_NEG)) + Di) * T(0.5)
};
T Dpos[3] = {
(read<T>(domain.surf, clampedVox(domain.res, vox, X_POS)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Y_POS)) + Di) * T(0.5),
(read<T>(domain.surf, clampedVox(domain.res, vox, Z_POS)) + Di) * T(0.5)
};
T coeffs[7];
bool useInMatrix[7];
coeffs[DIR_NONE] = T(0);
useInMatrix[DIR_NONE] = true;
for (uint j = 0; j < DIR_NONE; j++) {
const uint k = _getDirIndex(Dir(j));
const int sgn = _getDirSgn(Dir(j));
const T Dface = (sgn == -1) ? Dneg[k] : Dpos[k];
T cellDist[3] = { T(const_sys_params.cellDim[0]) , T(const_sys_params.cellDim[1]), T(const_sys_params.cellDim[2]) };
useInMatrix[j] = true;
if ((_at<uint>(vox, k) == 0 && sgn == -1) ||
(_at<uint>(vox, k) == _at<uint>(domain.res, k) - 1 && sgn == 1)
) {
cellDist[k] = const_sys_params.cellDim[k] * T(0.5);
useInMatrix[j] = false;
}
coeffs[j] = (Dface * const_sys_params.faceArea[k]) / cellDist[k];
//Subtract from diagonal
if (useInMatrix[j] || k == const_sys_params.dirPrimary)
coeffs[DIR_NONE] -= coeffs[j];
}
/*
Calculate right hand side
*/
const uint primaryRes = ((uint*)&domain.res)[const_sys_params.dirPrimary];
T rhs = T(0);
if (_at<uint>(vox, const_sys_params.dirPrimary) == 0) {
Dir dir = _getDir(const_sys_params.dirPrimary, -1);
rhs -= coeffs[dir] * const_sys_params.concetrationBegin;
}
else if (_at<uint>(vox, const_sys_params.dirPrimary) == primaryRes - 1) {
Dir dir = _getDir(const_sys_params.dirPrimary, 1);
rhs -= coeffs[dir] * const_sys_params.concetrationEnd;
}
*f = rhs;
/*
Initial guess
*/
uint primaryVox = _at<uint>(vox, const_sys_params.dirPrimary);
if (_getDirSgn(const_sys_params.dir) == 1)
*x = 1.0f - (primaryVox / T(primaryRes + 1));
else
*x = (primaryVox / T(primaryRes + 1));
#pragma unroll
for (uint j = 0; j < DIR_NONE; j++) {
if (!useInMatrix[j])
coeffs[j] = T(0);
}
#pragma unroll
for (uint i = 0; i < 7; i++) {
out->v[i] = coeffs[i];
}
}
template<typename T>
void __global__ ___systemTopKernel(
CUDA_Volume domain,
CUDA_Stencil_7 * A,
CUDA_Volume f
) {
VOLUME_VOX_GUARD(domain.res);
size_t i = _linearIndex(domain.res, vox);
T fval = 0.0;
T xval = 0.0;
getSystemTopKernel<T>(domain, vox, &A[i], &fval, &xval);
write<T>(f.surf, vox, fval);
}
template<typename T>
void __global__ ___systemTopKernelWithGuess(
CUDA_Volume domain,
CUDA_Stencil_7 * A,
CUDA_Volume f,
CUDA_Volume x
) {
VOLUME_VOX_GUARD(domain.res);
size_t i = _linearIndex(domain.res, vox);
T fval = 0.0;
T xval = 0.0;
getSystemTopKernel<T>(domain, vox, &A[i], &fval, &xval);
write<T>(f.surf, vox, fval);
write<T>(x.surf, vox, xval);
}
void LinearSys_GenerateSystem(
const CUDA_Volume & domain,
CUDA_Stencil_7 * A0,
CUDA_Volume & f,
CUDA_Volume * x
) {
assert(domain.type == f.type);
if (x) {
assert(domain.type == x->type);
}
assert(domain.type == TYPE_DOUBLE || domain.type == TYPE_FLOAT);
BLOCKS3D(2, domain.res);
if (x == nullptr) {
if (domain.type == TYPE_DOUBLE)
___systemTopKernel<double><< < numBlocks, block >> > (domain,A0,f);
else
___systemTopKernel<float> << < numBlocks, block >> > (domain, A0, f);
}
else {
if (domain.type == TYPE_DOUBLE)
___systemTopKernelWithGuess <double> << < numBlocks, block >> > (domain, A0, f, *x);
else
___systemTopKernelWithGuess <float> << < numBlocks, block >> > (domain, A0, f, *x);
}
}
template <typename T>
__global__ void ___invertDiagKernel(
const CUDA_Stencil_7 * A,
CUDA_Volume ainvert
) {
VOLUME_VOX_GUARD(ainvert.res);
const size_t I = _linearIndex(ainvert.res, vox);
const T iadiag = T(1.0) / A[I].v[DIR_NONE];
write<T>(ainvert.surf, vox, iadiag);
}
void LinearSys_InvertSystemDiagTo(
const CUDA_Stencil_7 * A,
CUDA_Volume & ainvert
) {
assert(ainvert.type == TYPE_DOUBLE || ainvert.type == TYPE_FLOAT);
BLOCKS3D(4, ainvert.res);
if (ainvert.type == TYPE_DOUBLE) {
___invertDiagKernel<double> << < numBlocks, block >> > (A, ainvert);
}
else {
___invertDiagKernel<float> << < numBlocks, block >> > (A, ainvert);
}
}
__global__ void __residualKernel(
const CUDA_Stencil_7 * A0,
const CUDA_Volume x,
const CUDA_Volume f,
CUDA_Volume r
) {
VOLUME_IVOX_GUARD(x.res);
const size_t I = _linearIndex(x.res, ivox);
const double Axval = convolve3D_SystemTop(ivox, A0[I], x);
const double fval = read<double>(f.surf, ivox);
const double rval = fval - Axval;
write<double>(r.surf, ivox, rval);
}
void LinearSys_Residual(
const CUDA_Stencil_7 * A0,
const CUDA_Volume & x,
const CUDA_Volume & f,
CUDA_Volume & r
) {
BLOCKS3D(8, x.res);
__residualKernel << < numBlocks, block >> > (A0, x, f, r);
}
__global__ void ___matrixVectorProductKernel(
const CUDA_Stencil_7 * A,
const CUDA_Volume x,
CUDA_Volume b
) {
VOLUME_IVOX_GUARD(x.res);
const size_t I = _linearIndex(x.res, ivox);
const double Axval = convolve3D_SystemTop(ivox, A[I], x);
write<double>(b.surf, ivox, Axval);
}
template<int blockSize, int apronSize>
double __device__ convolve3D_SystemTop_Shared(
const int3 & relVox, //in block relative voxel
const CUDA_Stencil_7 & k,
double * x
) {
double sum = 0.0;
//Index in apron
const int index = _linearIndex(make_int3(apronSize), relVox + make_int3(1));
const int3 stride = make_int3(1, apronSize, apronSize*apronSize);
sum += k.v[X_POS] * x[index + stride.x];
sum += k.v[X_NEG] * x[index - stride.x];
sum += k.v[Y_POS] * x[index + stride.y];
sum += k.v[Y_NEG] * x[index - stride.y];
sum += k.v[Z_POS] * x[index + stride.z];
sum += k.v[Z_NEG] * x[index - stride.z];
sum += k.v[DIR_NONE] * x[index];
return sum;
}
template <int blockSize>
__global__ void ___matrixVectorProductKernelShared(
const CUDA_Stencil_7 * A,
const CUDA_Volume x,
CUDA_Volume b
) {
const int apronSize = blockSize + 2;
const int totalBlockSize = blockSize*blockSize*blockSize;
const int totalApronSize = apronSize * apronSize * apronSize;
const int perThread = (totalApronSize + totalBlockSize - 1) / totalBlockSize;
__shared__ double _s[totalApronSize];
VOLUME_BLOCK_IVOX; //defines blockIvox
//Position of apron
const int3 apronIvox = blockIvox - make_int3(1);
//Load apron to shared memory
int tid = _linearIndex(blockDim, threadIdx);
/*size_t blockIndex = _linearIndex(gridDim, blockIdx);
if (blockIndex == 1) {
printf("tid: %d, <%d, %d)\n", tid, min(tid*perThread, totalApronSize), min((tid+1)*perThread, totalApronSize));
}*/
// int3 relativeLoadVoxel = apronIvox +
for (int i = 0; i < perThread; i++) {
const int targetIndex = tid + i*totalBlockSize;
//const int targetIndex = tid*perThread + i;
if (targetIndex >= totalApronSize)
break;
const int3 targetPos = apronIvox + posFromLinear(make_int3(apronSize), targetIndex);
if (_isValidPos(x.res, targetPos))
_s[targetIndex] = read<double>(x.surf, targetPos);
else
_s[targetIndex] = 0.0;
}
__syncthreads();
//VOLUME_IVOX_GUARD(x.res);
//write<double>(b.surf, ivox, 1.0);
VOLUME_IVOX_GUARD(x.res);
const size_t I = _linearIndex(x.res, ivox);
CUDA_Stencil_7 a = A[I];
const double Axval = convolve3D_SystemTop_Shared<blockSize, apronSize>(make_int3(threadIdx.x, threadIdx.y, threadIdx.z), a, _s);
//const double Axval = convolve3D_SystemTop(ivox, A[I], x);
write<double>(b.surf, ivox, Axval);
}
void LinearSys_MatrixVectorProduct(
const CUDA_Stencil_7 * A,
const CUDA_Volume & x,
CUDA_Volume & b
) {
const bool optimized = true;
if (!optimized) {
BLOCKS3D(8, x.res);
___matrixVectorProductKernel << < numBlocks, block >> > (A, x, b);
}
else {
const int blockSize = 8;
BLOCKS3D(blockSize, x.res);
___matrixVectorProductKernelShared<blockSize> << < numBlocks, block >> > (A, x, b);
}
} |
aa62b73530800788eb6157517a2722d8a23b2de0.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* timeScan.cu
*
* Microbenchmark to time scan performance.
*
* Build with: nvcc -I ../../chLib <options> timeScan.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <chTimer.h>
#include <chAssert.h>
#include <chError.h>
#include "scanWarp.cuh"
#include "scanBlock.cuh"
#include "scanZeroPad.cuh"
#define min(a,b) ((a)<(b)?(a):(b))
int *g_hostIn, *g_hostOut;
#include "scanFan.cuh"
#include "scanReduceThenScan.cuh"
#include "scanReduceThenScan_0.cuh"
#include "scan2Level.cuh"
#include "ScanThrust.cuh"
void
RandomArray( int *out, size_t N, int modulus )
{
for ( size_t i = 0; i < N; i++ ) {
out[i] = rand() % modulus;
}
}
template<class T>
double
TimeScan( void (*pfnScanGPU)(T *, const T *, size_t, int),
size_t N,
int numThreads,
int cIterations )
{
chTimerTimestamp start, stop;
hipError_t status;
double ret = 0.0;
int *inGPU = 0;
int *outGPU = 0;
int *inCPU = (int *) malloc( N*sizeof(T) );
int *outCPU = (int *) malloc( N*sizeof(T) );
if ( 0==inCPU || 0==outCPU )
goto Error;
cuda(Malloc( &inGPU, N*sizeof(T) ) );
cuda(Malloc( &outGPU, N*sizeof(T) ) );
RandomArray( inCPU, N, N );
cuda(Memcpy( inGPU, inCPU, N*sizeof(T), hipMemcpyHostToDevice ) );
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
pfnScanGPU( outGPU, inGPU, N, numThreads );
}
if ( hipSuccess != hipDeviceSynchronize() )
goto Error;
chTimerGetTime( &stop );
// ints per second
ret = (double) cIterations*N / chTimerElapsedTime( &start, &stop );
Error:
hipFree( outGPU );
hipFree( inGPU );
free( inCPU );
free( outCPU );
return ret;
}
int
main( int argc, char *argv[] )
{
int maxThreads;
hipSetDevice( 0 );
hipSetDeviceFlags( hipDeviceMapHost );
{
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
maxThreads = prop.maxThreadsPerBlock;
}
printf( "ScanThrust (64M): %.2f Mints/s\n", TimeScan<int>(ScanThrust<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanFan (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanFan (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 256, 10)/1048576 );
printf( "scanFan (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scanFan (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 1024, 10)/1048576 );
printf( "scanReduceThenScan (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanReduceThenScan (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 256, 10)/1048576 );
printf( "scanReduceThenScan (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scanReduceThenScan (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 1024, 10)/1048576 );
printf( "scanReduceThenScan_0 (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanReduceThenScan_0 (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 256, 10)/1048576 );
printf( "scanReduceThenScan_0 (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scanReduceThenScan_0 (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 1024, 10)/1048576 );
printf( "scan2Level_0 (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 128, 10)/1048576 );
printf( "scan2Level_0 (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 256, 10)/1048576 );
printf( "scan2Level_0 (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scan2Level_0 (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 1024, 10)/1048576 );
}
| aa62b73530800788eb6157517a2722d8a23b2de0.cu | /*
*
* timeScan.cu
*
* Microbenchmark to time scan performance.
*
* Build with: nvcc -I ../../chLib <options> timeScan.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <chTimer.h>
#include <chAssert.h>
#include <chError.h>
#include "scanWarp.cuh"
#include "scanBlock.cuh"
#include "scanZeroPad.cuh"
#define min(a,b) ((a)<(b)?(a):(b))
int *g_hostIn, *g_hostOut;
#include "scanFan.cuh"
#include "scanReduceThenScan.cuh"
#include "scanReduceThenScan_0.cuh"
#include "scan2Level.cuh"
#include "ScanThrust.cuh"
void
RandomArray( int *out, size_t N, int modulus )
{
for ( size_t i = 0; i < N; i++ ) {
out[i] = rand() % modulus;
}
}
template<class T>
double
TimeScan( void (*pfnScanGPU)(T *, const T *, size_t, int),
size_t N,
int numThreads,
int cIterations )
{
chTimerTimestamp start, stop;
cudaError_t status;
double ret = 0.0;
int *inGPU = 0;
int *outGPU = 0;
int *inCPU = (int *) malloc( N*sizeof(T) );
int *outCPU = (int *) malloc( N*sizeof(T) );
if ( 0==inCPU || 0==outCPU )
goto Error;
cuda(Malloc( &inGPU, N*sizeof(T) ) );
cuda(Malloc( &outGPU, N*sizeof(T) ) );
RandomArray( inCPU, N, N );
cuda(Memcpy( inGPU, inCPU, N*sizeof(T), cudaMemcpyHostToDevice ) );
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
pfnScanGPU( outGPU, inGPU, N, numThreads );
}
if ( cudaSuccess != cudaDeviceSynchronize() )
goto Error;
chTimerGetTime( &stop );
// ints per second
ret = (double) cIterations*N / chTimerElapsedTime( &start, &stop );
Error:
cudaFree( outGPU );
cudaFree( inGPU );
free( inCPU );
free( outCPU );
return ret;
}
int
main( int argc, char *argv[] )
{
int maxThreads;
cudaSetDevice( 0 );
cudaSetDeviceFlags( cudaDeviceMapHost );
{
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
maxThreads = prop.maxThreadsPerBlock;
}
printf( "ScanThrust (64M): %.2f Mints/s\n", TimeScan<int>(ScanThrust<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanFan (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanFan (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 256, 10)/1048576 );
printf( "scanFan (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scanFan (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scanFan<int>, 64*1048576, 1024, 10)/1048576 );
printf( "scanReduceThenScan (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanReduceThenScan (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 256, 10)/1048576 );
printf( "scanReduceThenScan (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scanReduceThenScan (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan<int>, 64*1048576, 1024, 10)/1048576 );
printf( "scanReduceThenScan_0 (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 128, 10)/1048576 );
printf( "scanReduceThenScan_0 (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 256, 10)/1048576 );
printf( "scanReduceThenScan_0 (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scanReduceThenScan_0 (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scanReduceThenScan_0<int>, 64*1048576, 1024, 10)/1048576 );
printf( "scan2Level_0 (64M, 128 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 128, 10)/1048576 );
printf( "scan2Level_0 (64M, 256 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 256, 10)/1048576 );
printf( "scan2Level_0 (64M, 512 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 512, 10)/1048576 );
if ( maxThreads >= 1024 )
printf( "scan2Level_0 (64M, 1024 threads/block): %.2f Mints/s\n", TimeScan<int>(scan2Level<int,true>, 64*1048576, 1024, 10)/1048576 );
}
|
e110575c04b76c774de62520dab86f6bed88786a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zswapblk.cu normal z -> d, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pairs of lines
*/
typedef struct {
double *A;
double *B;
int n, ldda, lddb, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_dswapblk_params_t;
__global__ void magmagpu_dswapblkrm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
double *A = params.A + y - params.ldda;
double *B = params.B + y;
for( int i = 0; i < params.npivots; i++ )
{
A += params.ldda;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A;
double *tmp2 = B + params.ipiv[i]*params.lddb;
*A = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_dswapblkcm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.ldda;
unsigned int offset2 = y*params.lddb;
if( y < params.n )
{
double *A = params.A + offset1 - 1;
double *B = params.B + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A++;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A;
double *tmp2 = B + params.ipiv[i];
*A = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/**
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk_q(
magma_order_t order, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA+k, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_dswapblkcm), dim3(blocks), dim3(blocksize), 0, queue , params );
}
}
else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA+k*ldda, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_dswapblkrm), dim3(blocks), dim3(blocksize), 0, queue , params );
}
}
}
/**
@see magmablas_dswapblk_q
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk(
magma_order_t order, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
magmablas_dswapblk_q(
order, n, dA, ldda, dB, lddb, i1, i2, ipiv, inci, offset, magma_stream );
}
| e110575c04b76c774de62520dab86f6bed88786a.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zswapblk.cu normal z -> d, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pairs of lines
*/
typedef struct {
double *A;
double *B;
int n, ldda, lddb, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_dswapblk_params_t;
__global__ void magmagpu_dswapblkrm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
double *A = params.A + y - params.ldda;
double *B = params.B + y;
for( int i = 0; i < params.npivots; i++ )
{
A += params.ldda;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A;
double *tmp2 = B + params.ipiv[i]*params.lddb;
*A = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_dswapblkcm( magmagpu_dswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.ldda;
unsigned int offset2 = y*params.lddb;
if( y < params.n )
{
double *A = params.A + offset1 - 1;
double *B = params.B + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A++;
if ( params.ipiv[i] == -1 )
continue;
double tmp1 = *A;
double *tmp2 = B + params.ipiv[i];
*A = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/**
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk_q(
magma_order_t order, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA+k, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_dswapblkcm<<< blocks, blocksize, 0, queue >>>( params );
}
}
else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_dswapblk_params_t params = { dA+k*ldda, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_dswapblkrm<<< blocks, blocksize, 0, queue >>>( params );
}
}
}
/**
@see magmablas_dswapblk_q
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dswapblk(
magma_order_t order, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magmaDouble_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
magmablas_dswapblk_q(
order, n, dA, ldda, dB, lddb, i1, i2, ipiv, inci, offset, magma_stream );
}
|
b861f68e1585b8c37a67167a74d3bce9e445181a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void generate__K__(float *K_gpu, int *shown_gpu, float *feat_gpu, float *K_noise_gpu,
int shown_size, int feature_size)
{
// Get co-ordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (z > feature_size || y > shown_size || x > shown_size) return;
atomicAdd(&K_gpu[y * shown_size + x], fdividef(fabs(feat_gpu[shown_gpu[x] * feature_size + z] -
feat_gpu[shown_gpu[y] * feature_size + z]), feature_size));
if(x == y) {
K_gpu[y * shown_size + x] = K_noise_gpu[x];
}
}
__global__ void generate__K_x__(float *K_x_gpu, int *shown_gpu, int *predict_gpu, float *feat_gpu,
int shown_size, int prediction_size, int feature_size)
{
// Get co-ordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (z > feature_size || y > prediction_size || x > shown_size) return;
atomicAdd(&K_x_gpu[y * shown_size + x], fdividef(fabsf(feat_gpu[shown_gpu[x] * feature_size + z] - feat_gpu[predict_gpu[y] * feature_size + z]), feature_size));
}
__global__ void generate__diag_K_xx__(float *diag_K_xx_gpu, float *K_xx_noise_gpu)
// Completely useless at the moment
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
diag_K_xx_gpu[x] = K_xx_noise_gpu[x];
}
__global__ void matMulDiag(float *A, float *B, float *C, int numRows, int numCols)
// Slow, fix if the speed of this operation becomes an issue
{
float result = 0.0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > numRows) return;
for (int i = 0; i < numCols; ++i) {
C[x] += A[x * numCols + i] * B[x * numCols + i];
}
}
__global__ void generate__variance__(float *variance_gpu, float *diag_K_xx_gpu, float *diag_K_xKK_x_T_gpu, int length)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > length) return;
variance_gpu[x] = fabsf(diag_K_xx_gpu[x] - diag_K_xKK_x_T_gpu[x]);
}
__global__ void generate__UCB__(float *ucb_gpu, float *mean_gpu, float *variance_gpu)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
ucb_gpu[x] = mean_gpu[x] + variance_gpu[x];
}
| b861f68e1585b8c37a67167a74d3bce9e445181a.cu | __global__ void generate__K__(float *K_gpu, int *shown_gpu, float *feat_gpu, float *K_noise_gpu,
int shown_size, int feature_size)
{
// Get co-ordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (z > feature_size || y > shown_size || x > shown_size) return;
atomicAdd(&K_gpu[y * shown_size + x], fdividef(fabs(feat_gpu[shown_gpu[x] * feature_size + z] -
feat_gpu[shown_gpu[y] * feature_size + z]), feature_size));
if(x == y) {
K_gpu[y * shown_size + x] = K_noise_gpu[x];
}
}
__global__ void generate__K_x__(float *K_x_gpu, int *shown_gpu, int *predict_gpu, float *feat_gpu,
int shown_size, int prediction_size, int feature_size)
{
// Get co-ordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (z > feature_size || y > prediction_size || x > shown_size) return;
atomicAdd(&K_x_gpu[y * shown_size + x], fdividef(fabsf(feat_gpu[shown_gpu[x] * feature_size + z] - feat_gpu[predict_gpu[y] * feature_size + z]), feature_size));
}
__global__ void generate__diag_K_xx__(float *diag_K_xx_gpu, float *K_xx_noise_gpu)
// Completely useless at the moment
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
diag_K_xx_gpu[x] = K_xx_noise_gpu[x];
}
__global__ void matMulDiag(float *A, float *B, float *C, int numRows, int numCols)
// Slow, fix if the speed of this operation becomes an issue
{
float result = 0.0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > numRows) return;
for (int i = 0; i < numCols; ++i) {
C[x] += A[x * numCols + i] * B[x * numCols + i];
}
}
__global__ void generate__variance__(float *variance_gpu, float *diag_K_xx_gpu, float *diag_K_xKK_x_T_gpu, int length)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > length) return;
variance_gpu[x] = fabsf(diag_K_xx_gpu[x] - diag_K_xKK_x_T_gpu[x]);
}
__global__ void generate__UCB__(float *ucb_gpu, float *mean_gpu, float *variance_gpu)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
ucb_gpu[x] = mean_gpu[x] + variance_gpu[x];
}
|
34cd8b3b6bf04fa5f318674a61ad7dbd0c27d58b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "test.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
test), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
test), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
test), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 34cd8b3b6bf04fa5f318674a61ad7dbd0c27d58b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "test.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
test<<<gridBlock,threadBlock>>>(a,b,c,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
test<<<gridBlock,threadBlock>>>(a,b,c,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
test<<<gridBlock,threadBlock>>>(a,b,c,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5ccbf429a6aea415545c7e60711af46021abf383.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i)
while( dA < dAend ) {
*dAT = MAGMA_Z_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
*dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i)
while( dA < dAend ) {
*dA = MAGMA_Z_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
*dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real
}
}
/***************************************************************************//**
Purpose
-------
ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
In Complex, it sets the diagonal to be Real.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symmetrize
*******************************************************************************/
extern "C" void
magmablas_zsymmetrize(
magma_uplo_t uplo, magma_int_t m,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zsymmetrize_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
else {
hipLaunchKernelGGL(( zsymmetrize_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
}
| 5ccbf429a6aea415545c7e60711af46021abf383.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i)
while( dA < dAend ) {
*dAT = MAGMA_Z_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
*dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i)
while( dA < dAend ) {
*dA = MAGMA_Z_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
*dA = MAGMA_Z_MAKE( MAGMA_Z_REAL(*dA), 0 ); // make diagonal real
}
}
/***************************************************************************//**
Purpose
-------
ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
In Complex, it sets the diagonal to be Real.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symmetrize
*******************************************************************************/
extern "C" void
magmablas_zsymmetrize(
magma_uplo_t uplo, magma_int_t m,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
zsymmetrize_upper<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
else {
zsymmetrize_lower<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
}
|
1b93b257afff227bc3f43efc3530a4b9b6a81cc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 1000 // number of threads per block
#define T 1// number of block
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main(int argc, char **argv) {
int a, b, c;
int *dev_c;
a = 3;
b = 4;
hipMalloc((void**)&dev_c, sizeof(int));
add << <1, 1 >> > (a, b, dev_c);
hipMemcpy(&c, dev_c, sizeof(int),
hipMemcpyDeviceToHost);
printf("%d + %d is %d\n", a, b, c);
hipFree(dev_c);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
| 1b93b257afff227bc3f43efc3530a4b9b6a81cc3.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 1000 // number of threads per block
#define T 1// number of block
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main(int argc, char **argv) {
int a, b, c;
int *dev_c;
a = 3;
b = 4;
cudaMalloc((void**)&dev_c, sizeof(int));
add << <1, 1 >> > (a, b, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int),
cudaMemcpyDeviceToHost);
printf("%d + %d is %d\n", a, b, c);
cudaFree(dev_c);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
|
bfd56ac98af3c4d6ef5638a1dcc8b91b410044be.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <stdio.h>
#include "bitVector.cpp"
#include <omp.h>
#include <string.h>
#include <math.h>
#include <bitset>
#include <climits>
#include <vector>
#include <ctype.h>
//#include "../common/common.h"
#include<sys/time.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
using namespace std;
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
class Sequence{
public:
char * name;
char * seq;
long size;
void readSequence(const char * file);
};
void Sequence::readSequence(const char * file)
{
FILE *f1 = fopen(file, "r" );
fpos_t position;
char s1[100];
fgets(s1,100,f1);
name = (char *)malloc(strlen(s1));
memset(name, 0, strlen(s1));
for( int i = 0; i < strlen(s1); i++)
{
name[i] = s1[i+1];
}
fgetpos (f1, &position);
int x = ftell(f1);
fsetpos (f1, &position);
fseek(f1,SEEK_SET , SEEK_END);
int y = ftell(f1);
size = y-x;
seq = new char[size];
fsetpos(f1, &position);
fread( seq, size, 1, f1 );
size = strlen(seq);
for( int i=0 ; i < strlen(seq); ++i )
seq[i] = toupper( seq[i] ) ;
fclose( f1 );
}
__global__ void preprocessing( char * d_characterSet, char * d_vec, char * d_query, int len, int charLength )
{
int i;
int j = 0;
int len1 = len + 1;
for( i = len-1; i >= 0; i--)
{
if( d_characterSet[threadIdx.x] == d_query[i] )
{
d_vec[threadIdx.x*len1 + j ] = '1';
}
else
{
d_vec[threadIdx.x*len1 + j ] = '0';
}
j++;
}
d_vec[threadIdx.x*len1+j] = '\0';
}
char* lookup(char dev_seq, char* char_set, int len)
{
char * str_ptr;
str_ptr = (char*)malloc(sizeof(char)*len);
switch(dev_seq){
case 'A':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[0*len+i];
break;
}
case 'G':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[1*len+i];
break;
}
case 'C':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[2*len+i];
break;
}
case 'T':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[3*len+i];
break;
}
default:
memset(str_ptr, '0', sizeof(char)*len);
}
return str_ptr;
}
char* binary_add( char opd1[], char opd2[], int len)
{
int len1 = len + 1;
char* result;; // To store the sum bits
result = (char*)malloc(sizeof(char)*len1);
memset(result, '0', sizeof(char)*len1);
bool carry = false; // Initialize carry
std::string sum = "";
int charToIntOne;
int charToIntTwo;
for (int i = len-1; i >= 0 ; i--)
{
charToIntOne = opd1[i] - '0';
charToIntTwo = opd2[i] - '0';
if (carry == true && (charToIntOne + charToIntTwo) >= 1)
{
sum += "0";
}
else if (carry == true && (charToIntOne + charToIntTwo) == 0)
{
sum += "1";
carry = false;
}
else
{
if ((charToIntOne + charToIntTwo) > 1)
{
sum += "0";
carry = true;
}
else if ((charToIntOne + charToIntTwo) == 1)
{
sum += "1";
carry = false;
}
else
{
sum += "0";
carry = false;
}
}
}
int k;
// if overflow, then add a leading 1
if (carry == true){
sum += "1";
k = sum.size()-2;
}
else{
k = sum.size()- 1;
}
char res[len1];
//cout<<sum.size()<<" "<<sum<<endl;
for (int j = 0; j < len; j++)
{
res[j] = sum.at(k);
k--;
}
res[len +1] = '\0';
//memset(result,res[len2],sizeof(char)*len2);
strcpy(result,res);
//cout<<result<<endl;
return result;
}
char* leftShift( char opd[], int len)
{
char * res;
res = (char*) malloc((len)*sizeof(char));
memset(res,'0',len);
for (int i = 0; i < len; i++)
{
opd[i] = opd[i+1];
if( i == len - 1)
opd[i] = '0';
}
strcpy(res,opd);
return res;
}
__global__ void ORoperation( char * dev_opd1, char * dev_opd2, char * dev_res, int len)
{
dev_res[threadIdx.x] = dev_opd1[threadIdx.x] | dev_opd2[threadIdx.x];
// printf("%c OR %c = %c\n",dev_opd1[threadIdx.x],dev_opd2[threadIdx.x],dev_res[threadIdx.x]);
}
__global__ void ANDoperation( char* dev_opd1, char* dev_opd2, char* dev_AND, int len)
{
// printf("id : %d \n", threadIdx.x);
dev_AND[threadIdx.x] = dev_opd1[threadIdx.x] & dev_opd2[threadIdx.x];
// printf("%c AND %c = %c\n",dev_opd1[threadIdx.x],dev_opd2[threadIdx.x],dev_AND[threadIdx.x]);
}
__global__ void XORoperation( char* dev_opd1, char* dev_opd2, char* dev_XOR, int len)
{
int a = dev_opd1[threadIdx.x] - '0';
int b = dev_opd2[threadIdx.x] - '0';
int res = a ^ b;
dev_XOR[threadIdx.x] = res + '0';
}
__global__ void NOToperation( char* dev_opd, char* dev_NOT, int len)
{
if( dev_opd[threadIdx.x] == '0' )
dev_NOT[threadIdx.x] = '1';
else
dev_NOT[threadIdx.x] = '0';
}
int main()
{
//computing Time
struct timeval t1, t2;
gettimeofday(&t1, 0);
int mismatch = 800;
//Reading the sequences from FASTA files
Sequence database;
const char * file = "1000_sequence.txt";
database.readSequence(file);
cout<<"Database Details"<<endl;
cout<<"name : "<<database.name<<endl;
cout<<"size : "<<database.size<<endl;
//cout<<"sequence"<<endl<<database.seq;
Sequence query[2];
const char * file1 = "pattern.txt";
query[0].readSequence(file1);
cout<<"Query 0 Details"<<endl;
cout<<"name : "<<query[0].name<<endl;
cout<<"size : "<<query[0].size<<endl;
//cout<<"sequence"<<endl<<query[0].seq;
/*const char * file2 = "query1.txt";
query[1].readSequence(file2);
cout<<"Query 1 Details"<<endl;
cout<<"name : "<<query[1].name<<endl;
cout<<"size : "<<query[1].size<<endl;
cout<<"sequence"<<endl<<query[1].seq;*/
char characterSet[] = "AGCT";
for( int i = 0; i < 1; i++)
{ //int i = 0;
char * d_characterSet;
char * d_query;
int num_cores = strlen(characterSet);
int len = query[i].size - 1;
int len1 = query[i].size;
char h_vec[num_cores][len1];
char * d_vec;
hipMalloc((char**)&d_vec, num_cores*len1*sizeof(char));
hipMalloc((char**)&d_query, len* sizeof(char));
hipMalloc((char**)&d_characterSet, strlen(characterSet)*sizeof(char));
hipMemcpy( d_query, query[i].seq, len, hipMemcpyHostToDevice);
hipMemcpy( d_characterSet, characterSet, strlen(characterSet), hipMemcpyHostToDevice );
dim3 block(num_cores);
hipLaunchKernelGGL(( preprocessing) , dim3(1), dim3(num_cores) , 0, 0, d_characterSet, d_vec, d_query, len, strlen(characterSet));
//hipDeviceSynchronize();
//cudaCheckErrors("error");
hipMemcpy( h_vec, d_vec, num_cores*len1, hipMemcpyDeviceToHost );
/*for ( int j = 0;j<num_cores;j++)
{
for(int k = 0;k<len1;k++)
{
cout<<h_vec[j][k];
}
cout<<endl;
}*/
char VN[len1];
memset(VN,'0',sizeof(char)*len);
char VP[len1];
memset(VP,'1',sizeof(char)*len);
char X[len];
char D0[len];
memset(D0,'0',sizeof(char)*len);
char HN[len];
memset(HN,'0',sizeof(char)*len);
char HP[len];
memset(HP,'0',sizeof(char)*len);
char temp[len];
memset(temp,'0',sizeof(char)*len);
int score = len;
// begin = clock();
//cout<<"VP: "<<VP<<endl;
//cout<<"VN: "<<VN<<endl;
cout<<"The max score: "<<score<<endl;
for( int k = 0; k < database.size - 1; k++ )
{
char* ptr;
ptr = lookup( database.seq[k], (char*)h_vec, len1 );
char Deq[len1];
strncpy(Deq, ptr, (len1)*sizeof(char));
//cout<<"******************Reading Text["<<k<<"]*****"<<database.seq[k]<<"****************************************"<<endl;
//cout<<"Deq: "<<Deq<<endl;
//OR operation on device
char* dev_opd1;
char* dev_opd2;
char* dev_OR;
hipMalloc((char**)&dev_opd1,len1*sizeof(char));
hipMalloc((char**)&dev_opd2, len1*sizeof(char));
hipMalloc((char**)&dev_OR, len*sizeof(char));
hipMemcpy( dev_opd1, Deq, len1, hipMemcpyHostToDevice );
hipMemcpy( dev_opd2, VN, len1, hipMemcpyHostToDevice );
dim3 block1(len);
hipLaunchKernelGGL(( ORoperation) , dim3(1), dim3(len) , 0, 0, dev_opd1, dev_opd2, dev_OR, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(X, dev_OR, len, hipMemcpyDeviceToHost );
/*cout<<"X is ";
for( int x=0;x<len;x++)
cout<<X[x];
cout<<endl;*/
char* dev_AND;
char host_AND[len];
hipMalloc((char**)&dev_AND, len*sizeof(char));
hipMemcpy( dev_opd2, VP, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ANDoperation) , dim3(1),dim3(len), 0, 0, dev_OR, dev_opd2, dev_AND, len);
hipMemcpy(host_AND, dev_AND, len, hipMemcpyDeviceToHost);
/*cout<<"AND of X and VP: ";
for( int x=0;x<len;x++)
cout<<host_AND[x];
cout<<endl;*/
char* sum_ptr;
sum_ptr = binary_add( host_AND, VP, len);
char host_SUM[len1];
strncpy(host_SUM, sum_ptr,(len1)*sizeof(char));
//cout<<"Sum of host_AND & VP : "<<host_SUM<<endl;
char host_XOR[len];
char* dev_XOR;
//memset(host_XOR, '0', sizeof(char) * len);
hipMalloc((char**)&dev_XOR, len*sizeof(char));
//hipMalloc((char**)&dev_opd1, len*sizeof(char));
//hipMemcpy( dev_XOR, host_XOR, len, hipMemcpyHostToDevice);
hipMemcpy( dev_opd1, host_SUM, len1, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( XORoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_XOR, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(host_XOR, dev_XOR, len, hipMemcpyDeviceToHost);
/*cout<<" XOR: ";
for( int x=0;x<len;x++)
cout<<host_XOR[x];
cout<<endl;*/
hipMemcpy( dev_opd2, X, len, hipMemcpyHostToDevice );
//hipMemcpy( dev_OR, D0, len, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( ORoperation) , dim3(1),dim3(len) , 0, 0, dev_XOR, dev_opd2, dev_OR, hipMemcpyHostToDevice);
hipMemcpy( D0, dev_OR, len, hipMemcpyDeviceToHost);
/*cout<<" D0: ";
for( int x=0;x<len;x++)
cout<<D0[x];
cout<<endl;*/
hipMalloc((char**)&dev_opd1, len*sizeof(char));
hipMemcpy( dev_opd1, VP, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ANDoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_OR, dev_AND, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy( HN, dev_AND, len, hipMemcpyDeviceToHost);
/*cout<<" HN: ";
for( int x=0;x<len;x++)
cout<<HN[x];
cout<<endl;*/
hipMemcpy( dev_opd2, D0, len, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( ORoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_OR, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(temp, dev_OR, len, hipMemcpyDeviceToHost);
/*cout<<"OR of D0 and VP: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
char * dev_NOT;
hipMalloc((char**)&dev_NOT, len*sizeof(char));
hipLaunchKernelGGL(( NOToperation) , dim3(1),dim3(len), 0, 0, dev_OR, dev_NOT, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(temp, dev_NOT, len, hipMemcpyDeviceToHost);
/*cout<<" not: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
hipMemcpy(dev_opd1, VN, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ORoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_NOT, dev_OR, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(HP, dev_OR, len, hipMemcpyDeviceToHost);
/*cout<<" HP: ";
for( int x=0;x<len;x++)
cout<<HP[x];
cout<<endl;*/
//score check
char h_arr[len];
int ind;
for( ind = 0; ind < len; ind++ )
{
if(ind == 0){
h_arr[ind] = '1';
}
else{
h_arr[ind] = '0';
}
}
char tmp1[len];
char tmp2[len];
/*cout<<"should be 100: ";
for( int x=0;x<len;x++)
cout<<h_arr[x];
cout<<endl;*/
hipMemcpy(dev_opd1, h_arr, len, hipMemcpyHostToDevice);
hipMemcpy(dev_opd2, HP, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ANDoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_AND, len);
hipMemcpy(tmp1, dev_AND, len, hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
cudaCheckErrors("error");
/*cout<<"HP AND 100: ";
for( int x=0;x<len;x++)
cout<<tmp1[x];
cout<<endl;*/
hipMemcpy(dev_opd2, HN, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ANDoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_AND, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(tmp2, dev_AND, len, hipMemcpyDeviceToHost);
/*cout<<"HN AND 100: ";
for( int x=0;x<len;x++)
cout<<tmp2[x];
cout<<endl;*/
bool res1 = false;
bool res2 = false;
for( int y = 0; y < len; y++ )
{
if(tmp1[y] != '0')
{
res1 = true;
break;
}
}
for( int i = 0; i < len; i++ )
{
if(tmp2[i] != '0')
{
res2 = true;
break;
}
}
if( res1 == true)
score = score + 1;
else if( res2 == true)
score = score - 1;
//cout<<"score: "<<score<<endl;
if( score <= mismatch){
//cout<<"appox. match at position: "<<k<<" score: "<<score<<" text character " <<database.seq[k]<<endl;
}
//VN and VP for next column
char * shft;
shft = leftShift( HP, len);
strncpy(X,shft,len*sizeof(char));
/*cout<<"new X: ";
for( int x=0;x<len;x++)
cout<<X[x];
cout<<endl;*/
hipMemcpy(dev_opd1, X, len, hipMemcpyHostToDevice);
hipMemcpy(dev_opd2, D0, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ANDoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_AND, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(VN, dev_AND, len, hipMemcpyDeviceToHost);
/*cout<<"new VN: ";
for( int x=0;x<len;x++)
cout<<VN[x];
cout<<endl;*/
hipLaunchKernelGGL(( ORoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_OR, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(temp, dev_OR, len, hipMemcpyDeviceToHost);
/*cout<<"OR of X & D0: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
hipLaunchKernelGGL(( NOToperation) , dim3(1),dim3(len), 0, 0, dev_OR, dev_NOT, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(temp, dev_NOT, len, hipMemcpyDeviceToHost);
/*cout<<" not: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
shft = leftShift( HN, len);
char shftHP[len];
strncpy(shftHP,shft,len*sizeof(char));
//cout<<"shift left of HP: "<<shftHP<<endl;
hipMemcpy(dev_opd1, shftHP, len, hipMemcpyHostToDevice);
hipMemcpy(dev_opd2, temp, len, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ORoperation) , dim3(1),dim3(len), 0, 0, dev_opd1, dev_opd2, dev_OR, len);
//hipDeviceSynchronize();
cudaCheckErrors("error");
hipMemcpy(VP, dev_OR, len, hipMemcpyDeviceToHost);
/*cout<<"new VP: ";
for( int x=0;x<len;x++)
cout<<VP[x];
cout<<endl;*/
}
}
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000000.0;
printf("Time to generate: %3.1f ms \n", time);
// end = clock();
// time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
// printf("Time to generate: %3.1f \n", time_spent);
return 0;
}
| bfd56ac98af3c4d6ef5638a1dcc8b91b410044be.cu | #include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <stdio.h>
#include "bitVector.cpp"
#include <omp.h>
#include <string.h>
#include <math.h>
#include <bitset>
#include <climits>
#include <vector>
#include <ctype.h>
//#include "../common/common.h"
#include<sys/time.h>
#include <cuda_runtime.h>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
using namespace std;
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
class Sequence{
public:
char * name;
char * seq;
long size;
void readSequence(const char * file);
};
void Sequence::readSequence(const char * file)
{
FILE *f1 = fopen(file, "r" );
fpos_t position;
char s1[100];
fgets(s1,100,f1);
name = (char *)malloc(strlen(s1));
memset(name, 0, strlen(s1));
for( int i = 0; i < strlen(s1); i++)
{
name[i] = s1[i+1];
}
fgetpos (f1, &position);
int x = ftell(f1);
fsetpos (f1, &position);
fseek(f1,SEEK_SET , SEEK_END);
int y = ftell(f1);
size = y-x;
seq = new char[size];
fsetpos(f1, &position);
fread( seq, size, 1, f1 );
size = strlen(seq);
for( int i=0 ; i < strlen(seq); ++i )
seq[i] = toupper( seq[i] ) ;
fclose( f1 );
}
__global__ void preprocessing( char * d_characterSet, char * d_vec, char * d_query, int len, int charLength )
{
int i;
int j = 0;
int len1 = len + 1;
for( i = len-1; i >= 0; i--)
{
if( d_characterSet[threadIdx.x] == d_query[i] )
{
d_vec[threadIdx.x*len1 + j ] = '1';
}
else
{
d_vec[threadIdx.x*len1 + j ] = '0';
}
j++;
}
d_vec[threadIdx.x*len1+j] = '\0';
}
char* lookup(char dev_seq, char* char_set, int len)
{
char * str_ptr;
str_ptr = (char*)malloc(sizeof(char)*len);
switch(dev_seq){
case 'A':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[0*len+i];
break;
}
case 'G':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[1*len+i];
break;
}
case 'C':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[2*len+i];
break;
}
case 'T':{
for (int i = 0;i<len;i++)
str_ptr[i] = char_set[3*len+i];
break;
}
default:
memset(str_ptr, '0', sizeof(char)*len);
}
return str_ptr;
}
char* binary_add( char opd1[], char opd2[], int len)
{
int len1 = len + 1;
char* result;; // To store the sum bits
result = (char*)malloc(sizeof(char)*len1);
memset(result, '0', sizeof(char)*len1);
bool carry = false; // Initialize carry
std::string sum = "";
int charToIntOne;
int charToIntTwo;
for (int i = len-1; i >= 0 ; i--)
{
charToIntOne = opd1[i] - '0';
charToIntTwo = opd2[i] - '0';
if (carry == true && (charToIntOne + charToIntTwo) >= 1)
{
sum += "0";
}
else if (carry == true && (charToIntOne + charToIntTwo) == 0)
{
sum += "1";
carry = false;
}
else
{
if ((charToIntOne + charToIntTwo) > 1)
{
sum += "0";
carry = true;
}
else if ((charToIntOne + charToIntTwo) == 1)
{
sum += "1";
carry = false;
}
else
{
sum += "0";
carry = false;
}
}
}
int k;
// if overflow, then add a leading 1
if (carry == true){
sum += "1";
k = sum.size()-2;
}
else{
k = sum.size()- 1;
}
char res[len1];
//cout<<sum.size()<<" "<<sum<<endl;
for (int j = 0; j < len; j++)
{
res[j] = sum.at(k);
k--;
}
res[len +1] = '\0';
//memset(result,res[len2],sizeof(char)*len2);
strcpy(result,res);
//cout<<result<<endl;
return result;
}
char* leftShift( char opd[], int len)
{
char * res;
res = (char*) malloc((len)*sizeof(char));
memset(res,'0',len);
for (int i = 0; i < len; i++)
{
opd[i] = opd[i+1];
if( i == len - 1)
opd[i] = '0';
}
strcpy(res,opd);
return res;
}
__global__ void ORoperation( char * dev_opd1, char * dev_opd2, char * dev_res, int len)
{
dev_res[threadIdx.x] = dev_opd1[threadIdx.x] | dev_opd2[threadIdx.x];
// printf("%c OR %c = %c\n",dev_opd1[threadIdx.x],dev_opd2[threadIdx.x],dev_res[threadIdx.x]);
}
__global__ void ANDoperation( char* dev_opd1, char* dev_opd2, char* dev_AND, int len)
{
// printf("id : %d \n", threadIdx.x);
dev_AND[threadIdx.x] = dev_opd1[threadIdx.x] & dev_opd2[threadIdx.x];
// printf("%c AND %c = %c\n",dev_opd1[threadIdx.x],dev_opd2[threadIdx.x],dev_AND[threadIdx.x]);
}
__global__ void XORoperation( char* dev_opd1, char* dev_opd2, char* dev_XOR, int len)
{
int a = dev_opd1[threadIdx.x] - '0';
int b = dev_opd2[threadIdx.x] - '0';
int res = a ^ b;
dev_XOR[threadIdx.x] = res + '0';
}
__global__ void NOToperation( char* dev_opd, char* dev_NOT, int len)
{
if( dev_opd[threadIdx.x] == '0' )
dev_NOT[threadIdx.x] = '1';
else
dev_NOT[threadIdx.x] = '0';
}
int main()
{
//computing Time
struct timeval t1, t2;
gettimeofday(&t1, 0);
int mismatch = 800;
//Reading the sequences from FASTA files
Sequence database;
const char * file = "1000_sequence.txt";
database.readSequence(file);
cout<<"Database Details"<<endl;
cout<<"name : "<<database.name<<endl;
cout<<"size : "<<database.size<<endl;
//cout<<"sequence"<<endl<<database.seq;
Sequence query[2];
const char * file1 = "pattern.txt";
query[0].readSequence(file1);
cout<<"Query 0 Details"<<endl;
cout<<"name : "<<query[0].name<<endl;
cout<<"size : "<<query[0].size<<endl;
//cout<<"sequence"<<endl<<query[0].seq;
/*const char * file2 = "query1.txt";
query[1].readSequence(file2);
cout<<"Query 1 Details"<<endl;
cout<<"name : "<<query[1].name<<endl;
cout<<"size : "<<query[1].size<<endl;
cout<<"sequence"<<endl<<query[1].seq;*/
char characterSet[] = "AGCT";
for( int i = 0; i < 1; i++)
{ //int i = 0;
char * d_characterSet;
char * d_query;
int num_cores = strlen(characterSet);
int len = query[i].size - 1;
int len1 = query[i].size;
char h_vec[num_cores][len1];
char * d_vec;
cudaMalloc((char**)&d_vec, num_cores*len1*sizeof(char));
cudaMalloc((char**)&d_query, len* sizeof(char));
cudaMalloc((char**)&d_characterSet, strlen(characterSet)*sizeof(char));
cudaMemcpy( d_query, query[i].seq, len, cudaMemcpyHostToDevice);
cudaMemcpy( d_characterSet, characterSet, strlen(characterSet), cudaMemcpyHostToDevice );
dim3 block(num_cores);
preprocessing <<< 1, num_cores >>> (d_characterSet, d_vec, d_query, len, strlen(characterSet));
//cudaDeviceSynchronize();
//cudaCheckErrors("error");
cudaMemcpy( h_vec, d_vec, num_cores*len1, cudaMemcpyDeviceToHost );
/*for ( int j = 0;j<num_cores;j++)
{
for(int k = 0;k<len1;k++)
{
cout<<h_vec[j][k];
}
cout<<endl;
}*/
char VN[len1];
memset(VN,'0',sizeof(char)*len);
char VP[len1];
memset(VP,'1',sizeof(char)*len);
char X[len];
char D0[len];
memset(D0,'0',sizeof(char)*len);
char HN[len];
memset(HN,'0',sizeof(char)*len);
char HP[len];
memset(HP,'0',sizeof(char)*len);
char temp[len];
memset(temp,'0',sizeof(char)*len);
int score = len;
// begin = clock();
//cout<<"VP: "<<VP<<endl;
//cout<<"VN: "<<VN<<endl;
cout<<"The max score: "<<score<<endl;
for( int k = 0; k < database.size - 1; k++ )
{
char* ptr;
ptr = lookup( database.seq[k], (char*)h_vec, len1 );
char Deq[len1];
strncpy(Deq, ptr, (len1)*sizeof(char));
//cout<<"******************Reading Text["<<k<<"]*****"<<database.seq[k]<<"****************************************"<<endl;
//cout<<"Deq: "<<Deq<<endl;
//OR operation on device
char* dev_opd1;
char* dev_opd2;
char* dev_OR;
cudaMalloc((char**)&dev_opd1,len1*sizeof(char));
cudaMalloc((char**)&dev_opd2, len1*sizeof(char));
cudaMalloc((char**)&dev_OR, len*sizeof(char));
cudaMemcpy( dev_opd1, Deq, len1, cudaMemcpyHostToDevice );
cudaMemcpy( dev_opd2, VN, len1, cudaMemcpyHostToDevice );
dim3 block1(len);
ORoperation <<< 1, len >>> ( dev_opd1, dev_opd2, dev_OR, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(X, dev_OR, len, cudaMemcpyDeviceToHost );
/*cout<<"X is ";
for( int x=0;x<len;x++)
cout<<X[x];
cout<<endl;*/
char* dev_AND;
char host_AND[len];
cudaMalloc((char**)&dev_AND, len*sizeof(char));
cudaMemcpy( dev_opd2, VP, len, cudaMemcpyHostToDevice);
ANDoperation <<<1,len>>> ( dev_OR, dev_opd2, dev_AND, len);
cudaMemcpy(host_AND, dev_AND, len, cudaMemcpyDeviceToHost);
/*cout<<"AND of X and VP: ";
for( int x=0;x<len;x++)
cout<<host_AND[x];
cout<<endl;*/
char* sum_ptr;
sum_ptr = binary_add( host_AND, VP, len);
char host_SUM[len1];
strncpy(host_SUM, sum_ptr,(len1)*sizeof(char));
//cout<<"Sum of host_AND & VP : "<<host_SUM<<endl;
char host_XOR[len];
char* dev_XOR;
//memset(host_XOR, '0', sizeof(char) * len);
cudaMalloc((char**)&dev_XOR, len*sizeof(char));
//cudaMalloc((char**)&dev_opd1, len*sizeof(char));
//cudaMemcpy( dev_XOR, host_XOR, len, cudaMemcpyHostToDevice);
cudaMemcpy( dev_opd1, host_SUM, len1, cudaMemcpyHostToDevice);
XORoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_XOR, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(host_XOR, dev_XOR, len, cudaMemcpyDeviceToHost);
/*cout<<" XOR: ";
for( int x=0;x<len;x++)
cout<<host_XOR[x];
cout<<endl;*/
cudaMemcpy( dev_opd2, X, len, cudaMemcpyHostToDevice );
//cudaMemcpy( dev_OR, D0, len, cudaMemcpyHostToDevice );
ORoperation <<< 1,len >>> ( dev_XOR, dev_opd2, dev_OR, cudaMemcpyHostToDevice);
cudaMemcpy( D0, dev_OR, len, cudaMemcpyDeviceToHost);
/*cout<<" D0: ";
for( int x=0;x<len;x++)
cout<<D0[x];
cout<<endl;*/
cudaMalloc((char**)&dev_opd1, len*sizeof(char));
cudaMemcpy( dev_opd1, VP, len, cudaMemcpyHostToDevice);
ANDoperation <<<1,len>>> (dev_opd1, dev_OR, dev_AND, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy( HN, dev_AND, len, cudaMemcpyDeviceToHost);
/*cout<<" HN: ";
for( int x=0;x<len;x++)
cout<<HN[x];
cout<<endl;*/
cudaMemcpy( dev_opd2, D0, len, cudaMemcpyHostToDevice );
ORoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_OR, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(temp, dev_OR, len, cudaMemcpyDeviceToHost);
/*cout<<"OR of D0 and VP: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
char * dev_NOT;
cudaMalloc((char**)&dev_NOT, len*sizeof(char));
NOToperation <<<1,len>>> ( dev_OR, dev_NOT, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(temp, dev_NOT, len, cudaMemcpyDeviceToHost);
/*cout<<" not: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
cudaMemcpy(dev_opd1, VN, len, cudaMemcpyHostToDevice);
ORoperation <<<1,len>>> ( dev_opd1, dev_NOT, dev_OR, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(HP, dev_OR, len, cudaMemcpyDeviceToHost);
/*cout<<" HP: ";
for( int x=0;x<len;x++)
cout<<HP[x];
cout<<endl;*/
//score check
char h_arr[len];
int ind;
for( ind = 0; ind < len; ind++ )
{
if(ind == 0){
h_arr[ind] = '1';
}
else{
h_arr[ind] = '0';
}
}
char tmp1[len];
char tmp2[len];
/*cout<<"should be 100: ";
for( int x=0;x<len;x++)
cout<<h_arr[x];
cout<<endl;*/
cudaMemcpy(dev_opd1, h_arr, len, cudaMemcpyHostToDevice);
cudaMemcpy(dev_opd2, HP, len, cudaMemcpyHostToDevice);
ANDoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_AND, len);
cudaMemcpy(tmp1, dev_AND, len, cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
/*cout<<"HP AND 100: ";
for( int x=0;x<len;x++)
cout<<tmp1[x];
cout<<endl;*/
cudaMemcpy(dev_opd2, HN, len, cudaMemcpyHostToDevice);
ANDoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_AND, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(tmp2, dev_AND, len, cudaMemcpyDeviceToHost);
/*cout<<"HN AND 100: ";
for( int x=0;x<len;x++)
cout<<tmp2[x];
cout<<endl;*/
bool res1 = false;
bool res2 = false;
for( int y = 0; y < len; y++ )
{
if(tmp1[y] != '0')
{
res1 = true;
break;
}
}
for( int i = 0; i < len; i++ )
{
if(tmp2[i] != '0')
{
res2 = true;
break;
}
}
if( res1 == true)
score = score + 1;
else if( res2 == true)
score = score - 1;
//cout<<"score: "<<score<<endl;
if( score <= mismatch){
//cout<<"appox. match at position: "<<k<<" score: "<<score<<" text character " <<database.seq[k]<<endl;
}
//VN and VP for next column
char * shft;
shft = leftShift( HP, len);
strncpy(X,shft,len*sizeof(char));
/*cout<<"new X: ";
for( int x=0;x<len;x++)
cout<<X[x];
cout<<endl;*/
cudaMemcpy(dev_opd1, X, len, cudaMemcpyHostToDevice);
cudaMemcpy(dev_opd2, D0, len, cudaMemcpyHostToDevice);
ANDoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_AND, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(VN, dev_AND, len, cudaMemcpyDeviceToHost);
/*cout<<"new VN: ";
for( int x=0;x<len;x++)
cout<<VN[x];
cout<<endl;*/
ORoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_OR, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(temp, dev_OR, len, cudaMemcpyDeviceToHost);
/*cout<<"OR of X & D0: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
NOToperation <<<1,len>>> ( dev_OR, dev_NOT, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(temp, dev_NOT, len, cudaMemcpyDeviceToHost);
/*cout<<" not: ";
for( int x=0;x<len;x++)
cout<<temp[x];
cout<<endl;*/
shft = leftShift( HN, len);
char shftHP[len];
strncpy(shftHP,shft,len*sizeof(char));
//cout<<"shift left of HP: "<<shftHP<<endl;
cudaMemcpy(dev_opd1, shftHP, len, cudaMemcpyHostToDevice);
cudaMemcpy(dev_opd2, temp, len, cudaMemcpyHostToDevice);
ORoperation <<<1,len>>> (dev_opd1, dev_opd2, dev_OR, len);
//cudaDeviceSynchronize();
cudaCheckErrors("error");
cudaMemcpy(VP, dev_OR, len, cudaMemcpyDeviceToHost);
/*cout<<"new VP: ";
for( int x=0;x<len;x++)
cout<<VP[x];
cout<<endl;*/
}
}
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000000.0;
printf("Time to generate: %3.1f ms \n", time);
// end = clock();
// time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
// printf("Time to generate: %3.1f \n", time_spent);
return 0;
}
|
87e1e71b4d8c6f5d30af42fb1bae164c38f910a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*Derek Trom
*HW5 CSCI364
*/
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
hipMallocManaged(&xd, sizeOfArray*sizeof(float));
hipMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
hipLaunchKernelGGL(( createArrays), dim3(numBlocks), dim3(threads_per_block), 0, 0, xd, yd, sizeOfArray);
hipLaunchKernelGGL(( func1), dim3(numBlocks), dim3(threads_per_block), 0, 0, xd,yd,sizeOfArray);
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
hipLaunchKernelGGL(( createArrays), dim3(numBlocks), dim3(threads_per_block), 0, 0, xd, yd, sizeOfArray);
hipLaunchKernelGGL(( func1), dim3(numBlocks), dim3(threads_per_block), 0, 0, xd,yd,sizeOfArray);
hipDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
hipFree(xd);
hipFree(yd);
return 0;
}
| 87e1e71b4d8c6f5d30af42fb1bae164c38f910a6.cu | /*
*Derek Trom
*HW5 CSCI364
*/
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <iomanip>
#include <cstdio>
__device__ float add(float num){
float outnum = num + 1;
return outnum;
}
__global__
void func1(float *xd, float *yd, int n) {
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+= stride) {
yd[i] = add(xd[i]);
}
}
__global__
void createArrays(float *in, float *out, int n){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = threadId; i < n; i+=stride) {
in[i] = 1.0f;
out[i] = 0.0f;
}
}
int main(int argc, char **argv){
using namespace std;
if( argc< 3){
cerr<<"Usage: "<<argv[0]<<" <length of arrays> <num threads/block>"<<endl;
return 1;
}
int threads_per_block = atoi(argv[2]);
int sizeOfArray = atoi(argv[1]);
if (sizeOfArray < 1 or threads_per_block < 1){
cerr<<"Array length and block size must be > 0"<<endl;
return 1;
}
float *xd, *yd;
cudaMallocManaged(&xd, sizeOfArray*sizeof(float));
cudaMallocManaged(&yd, sizeOfArray*sizeof(float));
//---------PHASE ONE----------//
int numBlocks = (sizeOfArray + threads_per_block- 1) / threads_per_block;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 1"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
//--------Phase 2-------//
//Use half the number of blocks to get the next number but use
//the same kernel function
threads_per_block = threads_per_block/2;
createArrays<<<numBlocks, threads_per_block>>>(xd, yd, sizeOfArray);
func1<<<numBlocks, threads_per_block>>>(xd,yd,sizeOfArray);
cudaDeviceSynchronize();
for (int i = 0; i < sizeOfArray; i++)
{
maxError = fmax(maxError, fabs(yd[i]-2.0f));
}
cout<<"Phase 2"<<endl;
cout<<endl<<"Array size: "<<sizeOfArray<<endl;
cout<<"Threads per block: "<<threads_per_block<<endl;
cout<<"Number of blocks: "<<numBlocks<<endl;
cout << "Max error: " << maxError << endl;
cudaFree(xd);
cudaFree(yd);
return 0;
}
|
2c421c13caca6b3dbe0916e2dd0f93411ca49dde.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "utils.h"
void _HANDLE_ERROR(hipError_t e, const char * file, int line)
{
if (e != hipSuccess)
{
printf("%s: %d. error %s\n", file, line, hipGetErrorString(e));
exit (1);
}
}
| 2c421c13caca6b3dbe0916e2dd0f93411ca49dde.cu | #include <cuda.h>
#include <stdio.h>
#include "utils.h"
void _HANDLE_ERROR(cudaError_t e, const char * file, int line)
{
if (e != cudaSuccess)
{
printf("%s: %d. error %s\n", file, line, cudaGetErrorString(e));
exit (1);
}
}
|
5743ec6f22d03907c4cfd637fe1e1cc3b3a073cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void transform_vert_to_fit(const int* src, int* dst, const int nb_vert)
{
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if(p < nb_vert) dst[p] = src[p] < 0 ? 0 : 1;
} | 5743ec6f22d03907c4cfd637fe1e1cc3b3a073cc.cu | #include "includes.h"
__global__ static void transform_vert_to_fit(const int* src, int* dst, const int nb_vert)
{
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if(p < nb_vert) dst[p] = src[p] < 0 ? 0 : 1;
} |
250474e402bc2de9be146038c2c344a29b131ca3.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <ConstantHelper.h>
#include <DataTypeUtils.h>
#include <shape.h>
#include <execution/LaunchContext.h>
#include <specials.h>
#include <logger.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <execution/AffinityManager.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace nd4j {
static void* getConstantSpace() {
Nd4jPointer dConstAddr;
auto dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw cuda_exception::build("hipGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() {
return AffinityManager::currentDeviceId();
}
int ConstantHelper::getNumberOfDevices() {
return AffinityManager::numberOfDevices();
}
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = hipSetDevice(e);
if (res != 0)
throw cuda_exception::build("hipSetDevice failed", res);
auto constant = getConstantSpace();
MAP_IMPL<ConstantDescriptor, ConstantHolder*> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = hipSetDevice(initialDevice);
if (res != 0)
throw cuda_exception::build("Final hipSetDevice failed", res);
}
ConstantHelper* ConstantHelper::getInstance() {
if (!_INSTANCE)
_INSTANCE = new nd4j::ConstantHelper();
return _INSTANCE;
}
void* ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
std::lock_guard<std::mutex> lock(_mutex);
auto deviceId = getCurrentDevice();
Nd4jPointer constantPtr = nullptr;
Nd4jLong constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = hipMemcpy(ptr, src, numBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("hipMemcpy failed", res);
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0)
numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = hipMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("hipMemcpyToSymbol failed", res);
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer* ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, nd4j::DataType dataType) {
const auto deviceId = getCurrentDevice();
// all cache modifications are synchronous
_mutexHolder.lock();
if (_cache[deviceId].count(descriptor) == 0) {
_cache[deviceId][descriptor] = new ConstantHolder();
}
auto holder = _cache[deviceId][descriptor];
// release cache lock
_mutexHolder.unlock();
ConstantDataBuffer* result;
// access to this holder instance is synchronous
std::lock_guard<std::mutex> lock(*holder->mutex());
if (holder->hasBuffer(dataType)) {
result = holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = new int8_t[numBytes];
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::DOUBLE, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff), (nd4j::DataType::DOUBLE, double), LIBND4J_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::INT64, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<Nd4jLong *>(descriptor.integerValues().data()), descriptor.length(), cbuff), (nd4j::DataType::INT64, Nd4jLong), LIBND4J_TYPES);
}
auto dbuff = replicatePointer(cbuff, descriptor.length() * DataTypeUtils::sizeOf(dataType));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), DataTypeUtils::sizeOf(dataType));
holder->addBuffer(dataBuffer, dataType);
result = holder->getConstantDataBuffer(dataType);
}
return result;
}
Nd4jLong ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
nd4j::ConstantHelper* nd4j::ConstantHelper::_INSTANCE = 0;
} | 250474e402bc2de9be146038c2c344a29b131ca3.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <ConstantHelper.h>
#include <DataTypeUtils.h>
#include <shape.h>
#include <execution/LaunchContext.h>
#include <specials.h>
#include <logger.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <execution/AffinityManager.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace nd4j {
static void* getConstantSpace() {
Nd4jPointer dConstAddr;
auto dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw cuda_exception::build("cudaGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() {
return AffinityManager::currentDeviceId();
}
int ConstantHelper::getNumberOfDevices() {
return AffinityManager::numberOfDevices();
}
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = cudaSetDevice(e);
if (res != 0)
throw cuda_exception::build("cudaSetDevice failed", res);
auto constant = getConstantSpace();
MAP_IMPL<ConstantDescriptor, ConstantHolder*> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = cudaSetDevice(initialDevice);
if (res != 0)
throw cuda_exception::build("Final cudaSetDevice failed", res);
}
ConstantHelper* ConstantHelper::getInstance() {
if (!_INSTANCE)
_INSTANCE = new nd4j::ConstantHelper();
return _INSTANCE;
}
void* ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
std::lock_guard<std::mutex> lock(_mutex);
auto deviceId = getCurrentDevice();
Nd4jPointer constantPtr = nullptr;
Nd4jLong constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = cudaMemcpy(ptr, src, numBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("cudaMemcpy failed", res);
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0)
numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = cudaMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("cudaMemcpyToSymbol failed", res);
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer* ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, nd4j::DataType dataType) {
const auto deviceId = getCurrentDevice();
// all cache modifications are synchronous
_mutexHolder.lock();
if (_cache[deviceId].count(descriptor) == 0) {
_cache[deviceId][descriptor] = new ConstantHolder();
}
auto holder = _cache[deviceId][descriptor];
// release cache lock
_mutexHolder.unlock();
ConstantDataBuffer* result;
// access to this holder instance is synchronous
std::lock_guard<std::mutex> lock(*holder->mutex());
if (holder->hasBuffer(dataType)) {
result = holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = new int8_t[numBytes];
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::DOUBLE, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff), (nd4j::DataType::DOUBLE, double), LIBND4J_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::INT64, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<Nd4jLong *>(descriptor.integerValues().data()), descriptor.length(), cbuff), (nd4j::DataType::INT64, Nd4jLong), LIBND4J_TYPES);
}
auto dbuff = replicatePointer(cbuff, descriptor.length() * DataTypeUtils::sizeOf(dataType));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), DataTypeUtils::sizeOf(dataType));
holder->addBuffer(dataBuffer, dataType);
result = holder->getConstantDataBuffer(dataType);
}
return result;
}
Nd4jLong ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
nd4j::ConstantHelper* nd4j::ConstantHelper::_INSTANCE = 0;
} |
a6418834ca3b7d3f869acdec7179b6f3f9e9e49b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/selu.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_selu_forward(const int num, const float scale_,
const float coef, T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
y[idx] = x[idx] > (T)0 ? (T)scale_ * x[idx]
: (T)coef * (::exp(x[idx]) - (T)1);
}
}
template <typename T, bool accum = true>
__global__ void kernel_selu_backward(const int num, const float scale_,
const float coef, T *dx, const T *x,
const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
dx[idx] = (accum ? dx[idx] : (T)0) +
(x[idx] > (T)0 ? dy[idx] * (T)scale_
: dy[idx] * (T)coef * ::exp(x[idx]));
}
}
template <typename T>
void SELUCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
SELU<T>::setup_impl(inputs, outputs);
}
template <typename T>
void SELUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
size_t size = inputs[0]->size();
const float coef = this->alpha_ * this->scale_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_selu_forward, size, this->scale_, coef,
y, x);
}
template <typename T>
void SELUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(this->device_);
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
size_t size = inputs[0]->size();
const float coef = this->alpha_ * this->scale_;
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<Tc, true>), size,
this->scale_, coef, dx, x, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<Tc, false>), size,
this->scale_, coef, dx, x, dy);
}
}
} // namespace nbla
| a6418834ca3b7d3f869acdec7179b6f3f9e9e49b.cu | // Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/selu.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_selu_forward(const int num, const float scale_,
const float coef, T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
y[idx] = x[idx] > (T)0 ? (T)scale_ * x[idx]
: (T)coef * (std::exp(x[idx]) - (T)1);
}
}
template <typename T, bool accum = true>
__global__ void kernel_selu_backward(const int num, const float scale_,
const float coef, T *dx, const T *x,
const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
dx[idx] = (accum ? dx[idx] : (T)0) +
(x[idx] > (T)0 ? dy[idx] * (T)scale_
: dy[idx] * (T)coef * std::exp(x[idx]));
}
}
template <typename T>
void SELUCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
SELU<T>::setup_impl(inputs, outputs);
}
template <typename T>
void SELUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
size_t size = inputs[0]->size();
const float coef = this->alpha_ * this->scale_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_selu_forward, size, this->scale_, coef,
y, x);
}
template <typename T>
void SELUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(this->device_);
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
size_t size = inputs[0]->size();
const float coef = this->alpha_ * this->scale_;
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<Tc, true>), size,
this->scale_, coef, dx, x, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<Tc, false>), size,
this->scale_, coef, dx, x, dy);
}
}
} // namespace nbla
|
218082898cbeadc4e7e03b1a8c094540d302eaf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
// int const CUDA_NUM_THREADS = sizeof(unsigned long long) * 8;
int const CUDA_NUM_THREADS = 1024;
inline int CUDA_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// template<typename T>
float inline __device__ sgn(float x)
{
return x>0?1.0:-1.0;
}
// template<typename T>
__global__ void afm_kernel(const int nthreads, const float* lines, const int* shape_info, const int num, const int height, const int width, float* afmap, int* aflabel)
{
// aflabel[0] = 100;
CUDA_1D_KERNEL_LOOP(index, nthreads){
// printf("%d, %d\n",index,nthreads);
// afmap[index] = 1;
// afmap[index+height*width] = 2;
// aflabel[index] = index;
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int x_index = n*2*height*width + h*width + w;
int y_index = n*2*height*width + height*width + h*width + w;
int label_index = n*height*width + h*width + w;
// printf("%d, %d, %d, %d, %d\n",index,nthreads, n, h, w);
float px = (float) w;
float py = (float) h;
int start = shape_info[n*4];
int end = shape_info[n*4+1];
float min_dis = 1e30;
for(int i = start; i < end; ++i) {
float xs = (float)width /(float)shape_info[n*4+3];
float ys = (float)height /(float)shape_info[n*4+2];
float x1 = lines[4*i]*xs;
float y1 = lines[4*i+1]*ys;
float x2 = lines[4*i+2]*xs;
float y2 = lines[4*i+3]*ys;
float dx = x2 - x1;
float dy = y2 - y1;
float norm2 = dx*dx + dy*dy;
float t = ((px-x1)*dx + (py-y1)*dy)/(norm2+1e-6);
t = t<1.0?t:1.0;
t = t>0.0?t:0.0;
float ax = x1 + t*(x2-x1) - px;
float ay = y1 + t*(y2-y1) - py;
float dis = ax*ax + ay*ay;
if (dis < min_dis) {
min_dis = dis;
// ax_opt = -sgn(ax)*log(fabs(ax/float(width)) + 1e-6);
// ay_opt = -sgn(ay)*log(fabs(ay/float(height)) + 1e-6);
afmap[x_index] = -sgn(ax)*log(fabs(ax/float(width)) + 1e-6);
afmap[y_index] = -sgn(ay)*log(fabs(ay/float(height)) + 1e-6);
aflabel[label_index] = i - start;
}
}
// afmap[x_index] = ax_opt;
// afmap[y_index] = ay_opt;
// aflabel[label_index] = ind_opt-start;
}
}
std::tuple<at::Tensor,at::Tensor> afm_cuda(
const at::Tensor& lines,
const at::Tensor& shape_info,
const int height,
const int width)
{
auto batch_size = shape_info.size(0);
auto afmap = at::zeros({batch_size,2,height,width}, lines.options());
auto aflabel = at::zeros({batch_size,1,height,width}, lines.options().dtype(at::kInt));
auto nthreads = batch_size*height*width;
// printf("nthreads = %d\n",nthreads);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
float* afmap_data = afmap.data<float>();
int* aflabel_data = aflabel.data<int>();
// printf("%.8f\n", log(1e-6));
hipLaunchKernelGGL(( afm_kernel), dim3(CUDA_GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS) , 0, 0,
nthreads,
lines.contiguous().data<float>(),
shape_info.contiguous().data<int>(),
batch_size, height, width,
afmap_data,
aflabel_data);
hipDeviceSynchronize();
// THCudaCheck(hipMemcpy(&aflabel_host[0],aflabel_dev,
// sizeof(int)*batch_size*height*width, hipMemcpyDeviceToHost));
// THCudaCheck(hipMemcpy(&afmap_host[0],afmap_dev,
// sizeof(int)*batch_size*2*height*width, hipMemcpyDeviceToHost));
// THCudaFree(state, aflabel_dev);
// THCudaFree(state, afmap_dev);
THCudaCheck(hipGetLastError());
return std::make_tuple(afmap, aflabel);
} | 218082898cbeadc4e7e03b1a8c094540d302eaf5.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
// int const CUDA_NUM_THREADS = sizeof(unsigned long long) * 8;
int const CUDA_NUM_THREADS = 1024;
inline int CUDA_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// template<typename T>
float inline __device__ sgn(float x)
{
return x>0?1.0:-1.0;
}
// template<typename T>
__global__ void afm_kernel(const int nthreads, const float* lines, const int* shape_info, const int num, const int height, const int width, float* afmap, int* aflabel)
{
// aflabel[0] = 100;
CUDA_1D_KERNEL_LOOP(index, nthreads){
// printf("%d, %d\n",index,nthreads);
// afmap[index] = 1;
// afmap[index+height*width] = 2;
// aflabel[index] = index;
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int x_index = n*2*height*width + h*width + w;
int y_index = n*2*height*width + height*width + h*width + w;
int label_index = n*height*width + h*width + w;
// printf("%d, %d, %d, %d, %d\n",index,nthreads, n, h, w);
float px = (float) w;
float py = (float) h;
int start = shape_info[n*4];
int end = shape_info[n*4+1];
float min_dis = 1e30;
for(int i = start; i < end; ++i) {
float xs = (float)width /(float)shape_info[n*4+3];
float ys = (float)height /(float)shape_info[n*4+2];
float x1 = lines[4*i]*xs;
float y1 = lines[4*i+1]*ys;
float x2 = lines[4*i+2]*xs;
float y2 = lines[4*i+3]*ys;
float dx = x2 - x1;
float dy = y2 - y1;
float norm2 = dx*dx + dy*dy;
float t = ((px-x1)*dx + (py-y1)*dy)/(norm2+1e-6);
t = t<1.0?t:1.0;
t = t>0.0?t:0.0;
float ax = x1 + t*(x2-x1) - px;
float ay = y1 + t*(y2-y1) - py;
float dis = ax*ax + ay*ay;
if (dis < min_dis) {
min_dis = dis;
// ax_opt = -sgn(ax)*log(fabs(ax/float(width)) + 1e-6);
// ay_opt = -sgn(ay)*log(fabs(ay/float(height)) + 1e-6);
afmap[x_index] = -sgn(ax)*log(fabs(ax/float(width)) + 1e-6);
afmap[y_index] = -sgn(ay)*log(fabs(ay/float(height)) + 1e-6);
aflabel[label_index] = i - start;
}
}
// afmap[x_index] = ax_opt;
// afmap[y_index] = ay_opt;
// aflabel[label_index] = ind_opt-start;
}
}
std::tuple<at::Tensor,at::Tensor> afm_cuda(
const at::Tensor& lines,
const at::Tensor& shape_info,
const int height,
const int width)
{
auto batch_size = shape_info.size(0);
auto afmap = at::zeros({batch_size,2,height,width}, lines.options());
auto aflabel = at::zeros({batch_size,1,height,width}, lines.options().dtype(at::kInt));
auto nthreads = batch_size*height*width;
// printf("nthreads = %d\n",nthreads);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
float* afmap_data = afmap.data<float>();
int* aflabel_data = aflabel.data<int>();
// printf("%.8f\n", log(1e-6));
afm_kernel<<<CUDA_GET_BLOCKS(nthreads), CUDA_NUM_THREADS >>>(
nthreads,
lines.contiguous().data<float>(),
shape_info.contiguous().data<int>(),
batch_size, height, width,
afmap_data,
aflabel_data);
cudaDeviceSynchronize();
// THCudaCheck(cudaMemcpy(&aflabel_host[0],aflabel_dev,
// sizeof(int)*batch_size*height*width, cudaMemcpyDeviceToHost));
// THCudaCheck(cudaMemcpy(&afmap_host[0],afmap_dev,
// sizeof(int)*batch_size*2*height*width, cudaMemcpyDeviceToHost));
// THCudaFree(state, aflabel_dev);
// THCudaFree(state, afmap_dev);
THCudaCheck(cudaGetLastError());
return std::make_tuple(afmap, aflabel);
} |
f79ca996f0adc696cf98de3c30a9643fc9401cbd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define GLASSES_FRAME_HEIGHT 2
#define GLASSES_BASE_WIDTH 4
#define GLASSES_BASE_HEIGHT 4
#define GLASSES_THRESHOLD .22f //definitely needs to be changed
#define GLASSES_SKIP_AMOUNT 2 //amount to skip in pixels, we can change this to be multiplied by scale if necessary/desirable
//This identifier is the glasses identifier with 3 horizontal bars going:
//light
//dark
//light
__global__
void glassesKernel(float* intImage, size_t stride, int* offsets, int windowSize, int numSubWindows, int scale, int* faceDetected, float* results, float* heatMap) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
if(threadNum < numSubWindows){
int startX = offsets[threadNum]/(stride);
int startY = offsets[threadNum]%stride;
float maxFitValue = 0.0f;
for (int i = startX; (i+ID4_BASE_WIDTH*scale) < (startX+windowSize); i = i+ID4_SKIP_AMOUNT){ //use ID4_SKIP_AMOUNT * scale for it to scale up as identifier scales
for (int j = startY; (j+ID4_BASE_HEIGHT*scale) < (startY + windowSize); j = j+ID4_SKIP_AMOUNT){ // take important corners from image
// take important corners from image
float upperLeft = intImage[i*stride + j];
float upperRight = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j];
float midLeftTop = intImage[i*stride + j + ((GLASSES_BASE_HEIGHT/2 - GLASSES_FRAME_HEIGHT/2) * scale)];
float midRightTop = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j + ((GLASSES_BASE_HEIGHT/2 - GLASSES_FRAME_HEIGHT/2) * scale)];
float midLeftBot = intImage[i*stride + j + ((GLASSES_BASE_HEIGHT/2 + GLASSES_FRAME_HEIGHT/2) * scale)];
float midRightBot = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j + ((GLASSES_BASE_HEIGHT/2 + GLASSES_FRAME_HEIGHT/2) * scale)];
float lowerLeft = intImage[i*stride + j+((GLASSES_FRAME_HEIGHT+GLASSES_BASE_HEIGHT)*scale)];
float lowerRight = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j+((GLASSES_FRAME_HEIGHT+GLASSES_BASE_HEIGHT)*scale)];
//calculate fit value based on identifier (hard-coded)
float fitValue = upperLeft - lowerLeft - upperRight + lowerRight + (midRightTop + midLeftBot - midLeftTop - midRightBot)*2;
if(fitValue > maxFitValue){
maxFitValue = fitValue;
}
}
}
// float goodnessValue = fitValue*1.0f/(GLASSES_BASE_WIDTH*scale*(GLASSES_FRAME_HEIGHT + GLASSES_BASE_HEIGHT)*scale); // goodnessValue = fit/area
float goodnessValue = maxFitValue/(ID4_BASE_WIDTH*scale*ID4_BASE_HEIGHT*scale); // goodnessValue = fit/area
// results[threadNum] = goodnessValue;
if(goodnessValue > GLASSES_THRESHOLD){
faceDetected[threadNum] = 1;
// for(int i = 0; i < windowSize; ++i){
// for(int j = 0; j < windowSize; ++j){
// heatMap[offsets[threadNum] + i*stride + j] = heatMap[offsets[threadNum] + i*stride + j] + 1.0f;
// }
// }
}
}
}
| f79ca996f0adc696cf98de3c30a9643fc9401cbd.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#define GLASSES_FRAME_HEIGHT 2
#define GLASSES_BASE_WIDTH 4
#define GLASSES_BASE_HEIGHT 4
#define GLASSES_THRESHOLD .22f //definitely needs to be changed
#define GLASSES_SKIP_AMOUNT 2 //amount to skip in pixels, we can change this to be multiplied by scale if necessary/desirable
//This identifier is the glasses identifier with 3 horizontal bars going:
//light
//dark
//light
__global__
void glassesKernel(float* intImage, size_t stride, int* offsets, int windowSize, int numSubWindows, int scale, int* faceDetected, float* results, float* heatMap) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
if(threadNum < numSubWindows){
int startX = offsets[threadNum]/(stride);
int startY = offsets[threadNum]%stride;
float maxFitValue = 0.0f;
for (int i = startX; (i+ID4_BASE_WIDTH*scale) < (startX+windowSize); i = i+ID4_SKIP_AMOUNT){ //use ID4_SKIP_AMOUNT * scale for it to scale up as identifier scales
for (int j = startY; (j+ID4_BASE_HEIGHT*scale) < (startY + windowSize); j = j+ID4_SKIP_AMOUNT){ // take important corners from image
// take important corners from image
float upperLeft = intImage[i*stride + j];
float upperRight = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j];
float midLeftTop = intImage[i*stride + j + ((GLASSES_BASE_HEIGHT/2 - GLASSES_FRAME_HEIGHT/2) * scale)];
float midRightTop = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j + ((GLASSES_BASE_HEIGHT/2 - GLASSES_FRAME_HEIGHT/2) * scale)];
float midLeftBot = intImage[i*stride + j + ((GLASSES_BASE_HEIGHT/2 + GLASSES_FRAME_HEIGHT/2) * scale)];
float midRightBot = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j + ((GLASSES_BASE_HEIGHT/2 + GLASSES_FRAME_HEIGHT/2) * scale)];
float lowerLeft = intImage[i*stride + j+((GLASSES_FRAME_HEIGHT+GLASSES_BASE_HEIGHT)*scale)];
float lowerRight = intImage[(i+GLASSES_BASE_WIDTH*scale)*stride + j+((GLASSES_FRAME_HEIGHT+GLASSES_BASE_HEIGHT)*scale)];
//calculate fit value based on identifier (hard-coded)
float fitValue = upperLeft - lowerLeft - upperRight + lowerRight + (midRightTop + midLeftBot - midLeftTop - midRightBot)*2;
if(fitValue > maxFitValue){
maxFitValue = fitValue;
}
}
}
// float goodnessValue = fitValue*1.0f/(GLASSES_BASE_WIDTH*scale*(GLASSES_FRAME_HEIGHT + GLASSES_BASE_HEIGHT)*scale); // goodnessValue = fit/area
float goodnessValue = maxFitValue/(ID4_BASE_WIDTH*scale*ID4_BASE_HEIGHT*scale); // goodnessValue = fit/area
// results[threadNum] = goodnessValue;
if(goodnessValue > GLASSES_THRESHOLD){
faceDetected[threadNum] = 1;
// for(int i = 0; i < windowSize; ++i){
// for(int j = 0; j < windowSize; ++j){
// heatMap[offsets[threadNum] + i*stride + j] = heatMap[offsets[threadNum] + i*stride + j] + 1.0f;
// }
// }
}
}
}
|
c264b794ef235d1738f134fe0b685b0a6c942a2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Data structures:
* points and points2: array of point coordinates indexed by vertex
* areaForce: array of forces indexed by vertex
* volumeForce: array of forces indexed by vertex
* triangleCountPerVertex: array of triangle indexes indexed by vertex
* trianglesByVertex: array of triangles indexed by index of first coordinate,
* last component is an index into triangles
* triangles:array of triangles indexed by triangle
* areas: array of areas indexed by triangle
* d_alpha: single float
* d_surfaceArea: single float
*
*/
__device__ float d_alpha, d_surfaceArea;
__device__ int *d_triangleCountPerVertex, *d_triangleOffset;
__device__ float *d_areas;
__device__ uint3 *d_trianglesByVertex;
__device__ float3 *d_areaForce, *d_volumeForce;
__device__ unsigned int d_triangleCount, d_vertexCount
__global__ void calculateForces(float3* d_points){
int vertexIndex = blockId.x;
if(vertexIndex >= d_vertexCount){
return;
}
int threadIndex = threadId.x;
if(threadIndex < d_trianglesPerVertex[vertexIndex]){
int triangleIndex = d_triangleOffset[vertexIndex];
uint3 tri = d_trianglesByVertex[triangleIndex + threadIndex];
float3 x1 = d_points[blockIndex];
float3 x2 = d_points[t.x];
float3 x3 = d_points[t.y];
float3 s1 = x2 - x1;
float3 s2 = x3 - x2;
// Remember to set to zero
// atomic add?
__syncthreads();
atomicAdd(&(d_areaForce[vertexIndex]),SIGMA/(4.0f*d_areas[t.z]) *
cross(s2, cross(s1, s2)));
atomicAdd(&(d_volumeForce[vertexIndex]), cross(x2, x3)/6.0f);
}
}
__global__ void calculateAlpha(){
float sum1, sum2;
for(int i = 0; i < d_vertexCount; i++){
sum1+=dot(d_volumeForce[i], d_areaForce[i]);
sum2+=dot(d_volumeForce[i], d_volumeForce[i]);
}
d_alpha = sum1 / sum2;
}
float retrieveAlpha(){
typeof(d_alpha) alpha;
hipMemcpyFromSymbol(&alpha, "d_alpha", sizeof(alpha),
0, hipMemcpyDeviceToHost);
return (float)alpha;
}
__global__ void displaceVertices(float lambda,
float3* d_points1,
float3* d_points2)
{
int vertexIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(vertexIndex < vertexCount){
d_points2[vertexIndex] = d_points1[vertexIndex] +
lambda*(d_areaForce[vertexIndex] -
d_alpha*d_volumeForce[vertexIndex]);
}
}
// Need to calculate areas before calling calculateForces for the first time!!
__global__ void calculateAreas(float3* points2){
int triangleIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(triangleIndex < d_triangleCount){
uint3 t = d_triangles[triangleIndex];
float3 s1 = d_points2[t.y] - d_points2[t.x];
float3 s2 = d_points2[t.z] - d_points2[t.y];
d_areas[triangleIndex] = length(cross(s1, s2))/2;
}
}
__global__ void sumSurfaceArea(){
d_surfaceArea = 0;
for(int i=0; i < trianglesCount; i++){
d_surfaceArea += d_areas[i];
}
}
float retrieveSurfaceArea(){
typeof(d_surfaceArea) surfaceArea;
hipMemcpyFromSymbol(&surfaceArea, "d_surfaceArea",
sizeof(answer), 0, hipMemcpyDeviceToHost);
return (float)surfaceArea;
}
// Returns surface area
float moveVertices(float lambda, float3* d_points1, float3* d_points2,
int vertexCount, int triangleCount,
int maxTrianglesPerVertex){
int blockCount_a, threadsPerBlock_a // blockCount_a * threadsPerBlock_a >= vertexCount
blockCount_b, threadsPerBlock_b;// blockCount_b * threadsPerBlock_b >= triangleCount
hipLaunchKernelGGL(( calculateForces), dim3(vertexCount), dim3(maxTrianglesPerVertex) , 0, 0, d_points1);
hipLaunchKernelGGL(( calculateAlpha), dim3(1), dim3(1) , 0, 0, );
hipLaunchKernelGGL(( displaceVertices), dim3(blockCount_a), dim3(threadsPerBlock_a) , 0, 0, lambda, d_points1, d_points2);
hipLaunchKernelGGL(( calculateAreas), dim3(blockCount_b), dim3(threadsPerBlock_b) , 0, 0, d_points2);
hipLaunchKernelGGL(( sumSurfaceArea), dim3(1), dim3(1) , 0, 0, );
return retrieveSurfaceArea();
} | c264b794ef235d1738f134fe0b685b0a6c942a2f.cu | /*
* Data structures:
* points and points2: array of point coordinates indexed by vertex
* areaForce: array of forces indexed by vertex
* volumeForce: array of forces indexed by vertex
* triangleCountPerVertex: array of triangle indexes indexed by vertex
* trianglesByVertex: array of triangles indexed by index of first coordinate,
* last component is an index into triangles
* triangles:array of triangles indexed by triangle
* areas: array of areas indexed by triangle
* d_alpha: single float
* d_surfaceArea: single float
*
*/
__device__ float d_alpha, d_surfaceArea;
__device__ int *d_triangleCountPerVertex, *d_triangleOffset;
__device__ float *d_areas;
__device__ uint3 *d_trianglesByVertex;
__device__ float3 *d_areaForce, *d_volumeForce;
__device__ unsigned int d_triangleCount, d_vertexCount
__global__ void calculateForces(float3* d_points){
int vertexIndex = blockId.x;
if(vertexIndex >= d_vertexCount){
return;
}
int threadIndex = threadId.x;
if(threadIndex < d_trianglesPerVertex[vertexIndex]){
int triangleIndex = d_triangleOffset[vertexIndex];
uint3 tri = d_trianglesByVertex[triangleIndex + threadIndex];
float3 x1 = d_points[blockIndex];
float3 x2 = d_points[t.x];
float3 x3 = d_points[t.y];
float3 s1 = x2 - x1;
float3 s2 = x3 - x2;
// Remember to set to zero
// atomic add?
__syncthreads();
atomicAdd(&(d_areaForce[vertexIndex]),SIGMA/(4.0f*d_areas[t.z]) *
cross(s2, cross(s1, s2)));
atomicAdd(&(d_volumeForce[vertexIndex]), cross(x2, x3)/6.0f);
}
}
__global__ void calculateAlpha(){
float sum1, sum2;
for(int i = 0; i < d_vertexCount; i++){
sum1+=dot(d_volumeForce[i], d_areaForce[i]);
sum2+=dot(d_volumeForce[i], d_volumeForce[i]);
}
d_alpha = sum1 / sum2;
}
float retrieveAlpha(){
typeof(d_alpha) alpha;
cudaMemcpyFromSymbol(&alpha, "d_alpha", sizeof(alpha),
0, cudaMemcpyDeviceToHost);
return (float)alpha;
}
__global__ void displaceVertices(float lambda,
float3* d_points1,
float3* d_points2)
{
int vertexIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(vertexIndex < vertexCount){
d_points2[vertexIndex] = d_points1[vertexIndex] +
lambda*(d_areaForce[vertexIndex] -
d_alpha*d_volumeForce[vertexIndex]);
}
}
// Need to calculate areas before calling calculateForces for the first time!!
__global__ void calculateAreas(float3* points2){
int triangleIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(triangleIndex < d_triangleCount){
uint3 t = d_triangles[triangleIndex];
float3 s1 = d_points2[t.y] - d_points2[t.x];
float3 s2 = d_points2[t.z] - d_points2[t.y];
d_areas[triangleIndex] = length(cross(s1, s2))/2;
}
}
__global__ void sumSurfaceArea(){
d_surfaceArea = 0;
for(int i=0; i < trianglesCount; i++){
d_surfaceArea += d_areas[i];
}
}
float retrieveSurfaceArea(){
typeof(d_surfaceArea) surfaceArea;
cudaMemcpyFromSymbol(&surfaceArea, "d_surfaceArea",
sizeof(answer), 0, cudaMemcpyDeviceToHost);
return (float)surfaceArea;
}
// Returns surface area
float moveVertices(float lambda, float3* d_points1, float3* d_points2,
int vertexCount, int triangleCount,
int maxTrianglesPerVertex){
int blockCount_a, threadsPerBlock_a // blockCount_a * threadsPerBlock_a >= vertexCount
blockCount_b, threadsPerBlock_b;// blockCount_b * threadsPerBlock_b >= triangleCount
calculateForces<<< vertexCount, maxTrianglesPerVertex >>>(d_points1);
calculateAlpha<<< 1, 1 >>>();
displaceVertices<<< blockCount_a, threadsPerBlock_a >>>(lambda, d_points1, d_points2);
calculateAreas<<< blockCount_b, threadsPerBlock_b >>>(d_points2);
sumSurfaceArea<<< 1, 1 >>>();
return retrieveSurfaceArea();
} |
8e8883a4aa28c057949afe999038dce81151cb66.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_6_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_3_;
double _t_2_;
double _t_1_;
double _t_11_;
double _t_9_;
double _t_12_;
double _t_10_;
double _t_13_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_17_;
double _t_20_;
double _t_15_;
double _t_14_;
double _t_23_;
double _t_21_;
double _t_24_;
double _t_22_;
double _t_25_;
double _t_0_;
double _t_31_;
double _t_29_;
double _t_32_;
double _t_30_;
double _t_33_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_36_;
double _t_34_;
double _t_37_;
double _t_35_;
double _t_38_;
double _t_43_;
double _t_41_;
double _t_44_;
double _t_42_;
double _t_45_;
double _t_40_;
double _t_39_;
double _t_48_;
double _t_46_;
double _t_49_;
double _t_47_;
double _t_50_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_58_;
double _t_57_;
double _t_55_;
double _t_59_;
double _t_56_;
double _t_60_;
double _t_54_;
double _t_53_;
double _t_64_;
double _t_62_;
double _t_65_;
double _t_63_;
double _t_66_;
double _t_61_;
double _t_69_;
double _t_67_;
double _t_70_;
double _t_68_;
double _t_71_;
double _t_76_;
double _t_75_;
double _t_73_;
double _t_77_;
double _t_74_;
double _t_78_;
double _t_72_;
double _t_82_;
double _t_80_;
double _t_83_;
double _t_81_;
double _t_84_;
double _t_79_;
double _t_87_;
double _t_85_;
double _t_88_;
double _t_86_;
double _t_89_;
double _t_52_;
double _t_95_;
double _t_94_;
double _t_92_;
double _t_96_;
double _t_93_;
double _t_97_;
double _t_91_;
double _t_90_;
double _t_101_;
double _t_99_;
double _t_102_;
double _t_100_;
double _t_103_;
double _t_98_;
double _t_106_;
double _t_104_;
double _t_107_;
double _t_105_;
double _t_108_;
double _t_113_;
double _t_112_;
double _t_110_;
double _t_114_;
double _t_111_;
double _t_115_;
double _t_109_;
double _t_119_;
double _t_117_;
double _t_120_;
double _t_118_;
double _t_121_;
double _t_116_;
double _t_124_;
double _t_122_;
double _t_125_;
double _t_123_;
double _t_126_;
double _t_51_;
double _t_134_;
double _t_133_;
double _t_131_;
double _t_135_;
double _t_132_;
double _t_136_;
double _t_130_;
double _t_129_;
double _t_128_;
double _t_139_;
double _t_137_;
double _t_140_;
double _t_138_;
double _t_141_;
double _t_145_;
double _t_143_;
double _t_146_;
double _t_144_;
double _t_147_;
double _t_142_;
double _t_153_;
double _t_152_;
double _t_150_;
double _t_154_;
double _t_151_;
double _t_155_;
double _t_149_;
double _t_148_;
double _t_158_;
double _t_156_;
double _t_159_;
double _t_157_;
double _t_160_;
double _t_164_;
double _t_162_;
double _t_165_;
double _t_163_;
double _t_166_;
double _t_161_;
double _t_127_;
double _t_173_;
double _t_172_;
double _t_170_;
double _t_174_;
double _t_171_;
double _t_175_;
double _t_169_;
double _t_168_;
double _t_167_;
double _t_178_;
double _t_176_;
double _t_179_;
double _t_177_;
double _t_180_;
double _t_184_;
double _t_182_;
double _t_185_;
double _t_183_;
double _t_186_;
double _t_181_;
double _t_192_;
double _t_191_;
double _t_189_;
double _t_193_;
double _t_190_;
double _t_194_;
double _t_188_;
double _t_187_;
double _t_197_;
double _t_195_;
double _t_198_;
double _t_196_;
double _t_199_;
double _t_203_;
double _t_201_;
double _t_204_;
double _t_202_;
double _t_205_;
double _t_200_;
_t_6_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_4_ = _t_6_ * met1[i][j][k+2];
_t_7_ = u1[i][j+2][k+2];
_t_7_ -= u1[i][j-2][k+2];
_t_5_ = c2 * _t_7_;
_t_8_ = u1[i][j+1][k+2];
_t_8_ -= u1[i][j-1][k+2];
_t_5_ += c1 * _t_8_;
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * stry[j+2];
_t_1_ = _t_2_ * strx[i];
_t_11_ = la[i][j][k+2] * met2[i][j][k+2];
_t_9_ = _t_11_ * met1[i][j][k+2];
_t_12_ = u2[i][j+2][k+2];
_t_12_ -= u2[i][j-2][k+2];
_t_10_ = c2 * _t_12_;
_t_13_ = u2[i][j+1][k+2];
_t_13_ -= u2[i][j-1][k+2];
_t_10_ += c1 * _t_13_;
_t_1_ += _t_9_ * _t_10_;
_t_18_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_16_ = _t_18_ * met1[i][j][k-2];
_t_19_ = u1[i][j+2][k-2];
_t_19_ -= u1[i][j-2][k-2];
_t_17_ = c2 * _t_19_;
_t_20_ = u1[i][j+1][k-2];
_t_20_ -= u1[i][j-1][k-2];
_t_17_ += c1 * _t_20_;
_t_15_ = _t_16_ * _t_17_;
_t_14_ = _t_15_ * stry[j];
_t_1_ += _t_14_ * strx[i];
_t_23_ = la[i][j][k-2] * met2[i][j][k-2];
_t_21_ = _t_23_ * met1[i][j][k-2];
_t_24_ = u2[i][j+2][k-2];
_t_24_ -= u2[i][j-2][k-2];
_t_22_ = c2 * _t_24_;
_t_25_ = u2[i][j+1][k-2];
_t_25_ -= u2[i][j-1][k-2];
_t_22_ += c1 * _t_25_;
_t_1_ += _t_21_ * _t_22_;
_t_0_ = c2 * _t_1_;
_t_31_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_29_ = _t_31_ * met1[i][j][k+1];
_t_32_ = u1[i][j+2][k+1];
_t_32_ -= u1[i][j-2][k+1];
_t_30_ = c2 * _t_32_;
_t_33_ = u1[i][j+1][k+1];
_t_33_ -= u1[i][j-1][k+1];
_t_30_ += c1 * _t_33_;
_t_28_ = _t_29_ * _t_30_;
_t_27_ = _t_28_ * stry[j-2];
_t_26_ = _t_27_ * strx[i];
_t_36_ = la[i][j][k+1] * met2[i][j][k+1];
_t_34_ = _t_36_ * met1[i][j][k+1];
_t_37_ = u2[i][j+2][k+1];
_t_37_ -= u2[i][j-2][k+1];
_t_35_ = c2 * _t_37_;
_t_38_ = u2[i][j+1][k+1];
_t_38_ -= u2[i][j-1][k+1];
_t_35_ += c1 * _t_38_;
_t_26_ += _t_34_ * _t_35_;
_t_43_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_41_ = _t_43_ * met1[i][j][k-1];
_t_44_ = u1[i][j+2][k-1];
_t_44_ -= u1[i][j-2][k-1];
_t_42_ = c2 * _t_44_;
_t_45_ = u1[i][j+1][k-1];
_t_45_ -= u1[i][j-1][k-1];
_t_42_ += c1 * _t_45_;
_t_40_ = _t_41_ * _t_42_;
_t_39_ = _t_40_ * stry[j];
_t_26_ += _t_39_ * strx[i];
_t_48_ = la[i][j][k-1] * met2[i][j][k-1];
_t_46_ = _t_48_ * met1[i][j][k-1];
_t_49_ = u2[i][j+2][k-1];
_t_49_ -= u2[i][j-2][k-1];
_t_47_ = c2 * _t_49_;
_t_50_ = u2[i][j+1][k-1];
_t_50_ -= u2[i][j-1][k-1];
_t_47_ += c1 * _t_50_;
_t_26_ += _t_46_ * _t_47_;
_t_0_ += c1 * _t_26_;
r1ic0jc0kc0 += _t_0_;
_t_58_ = 2.0 * mu[i+2][j][k];
_t_58_ += la[i+2][j][k];
_t_57_ = _t_58_ * met2[i+2][j][k];
_t_55_ = _t_57_ * met1[i+2][j][k];
_t_59_ = u1[i+2][j][k+2];
_t_59_ -= u1[i+2][j][k-2];
_t_56_ = c2 * _t_59_;
_t_60_ = u1[i+2][j][k+1];
_t_60_ -= u1[i+2][j][k-1];
_t_56_ += c1 * _t_60_;
_t_54_ = _t_55_ * _t_56_;
_t_53_ = _t_54_ * strx[i];
_t_64_ = la[i+2][j][k] * met3[i+2][j][k];
_t_62_ = _t_64_ * met1[i+2][j][k];
_t_65_ = u2[i+2][j][k+2];
_t_65_ -= u2[i+2][j][k-2];
_t_63_ = c2 * _t_65_;
_t_66_ = u2[i+2][j][k+1];
_t_66_ -= u2[i+2][j][k-1];
_t_63_ += c1 * _t_66_;
_t_61_ = _t_62_ * _t_63_;
_t_53_ += _t_61_ * stry[j];
_t_69_ = la[i+2][j][k] * met4[i+2][j][k];
_t_67_ = _t_69_ * met1[i+2][j][k];
_t_70_ = u3[i+2][j][k+2];
_t_70_ -= u3[i+2][j][k-2];
_t_68_ = c2 * _t_70_;
_t_71_ = u3[i+2][j][k+1];
_t_71_ -= u3[i+2][j][k-1];
_t_68_ += c1 * _t_71_;
_t_53_ += _t_67_ * _t_68_;
_t_76_ = 2.0 * mu[i-2][j][k];
_t_76_ += la[i-2][j][k];
_t_75_ = _t_76_ * met2[i-2][j][k];
_t_73_ = _t_75_ * met1[i-2][j][k];
_t_77_ = u1[i-2][j][k+2];
_t_77_ -= u1[i-2][j][k-2];
_t_74_ = c2 * _t_77_;
_t_78_ = u1[i-2][j][k+1];
_t_78_ -= u1[i-2][j][k-1];
_t_74_ += c1 * _t_78_;
_t_72_ = _t_73_ * _t_74_;
_t_53_ += _t_72_ * strx[i];
_t_82_ = la[i-2][j][k] * met3[i-2][j][k];
_t_80_ = _t_82_ * met1[i-2][j][k];
_t_83_ = u2[i-2][j][k+2];
_t_83_ -= u2[i-2][j][k-2];
_t_81_ = c2 * _t_83_;
_t_84_ = u2[i-2][j][k+1];
_t_84_ -= u2[i-2][j][k-1];
_t_81_ += c1 * _t_84_;
_t_79_ = _t_80_ * _t_81_;
_t_53_ += _t_79_ * stry[j];
_t_87_ = la[i-2][j][k] * met4[i-2][j][k];
_t_85_ = _t_87_ * met1[i-2][j][k];
_t_88_ = u3[i-2][j][k+2];
_t_88_ -= u3[i-2][j][k-2];
_t_86_ = c2 * _t_88_;
_t_89_ = u3[i-2][j][k+1];
_t_89_ -= u3[i-2][j][k-1];
_t_86_ += c1 * _t_89_;
_t_53_ += _t_85_ * _t_86_;
_t_52_ = c2 * _t_53_;
_t_95_ = 2.0 * mu[i+1][j][k];
_t_95_ += la[i+1][j][k];
_t_94_ = _t_95_ * met2[i+1][j][k];
_t_92_ = _t_94_ * met1[i+1][j][k];
_t_96_ = u1[i+1][j][k+2];
_t_96_ -= u1[i+1][j][k-2];
_t_93_ = c2 * _t_96_;
_t_97_ = u1[i+1][j][k+1];
_t_97_ -= u1[i+1][j][k-1];
_t_93_ += c1 * _t_97_;
_t_91_ = _t_92_ * _t_93_;
_t_90_ = _t_91_ * strx[i];
_t_101_ = la[i+1][j][k] * met3[i+1][j][k];
_t_99_ = _t_101_ * met1[i+1][j][k];
_t_102_ = u2[i+1][j][k+2];
_t_102_ -= u2[i+1][j][k-2];
_t_100_ = c2 * _t_102_;
_t_103_ = u2[i+1][j][k+1];
_t_103_ -= u2[i+1][j][k-1];
_t_100_ += c1 * _t_103_;
_t_98_ = _t_99_ * _t_100_;
_t_90_ += _t_98_ * stry[j];
_t_106_ = la[i+1][j][k] * met4[i+1][j][k];
_t_104_ = _t_106_ * met1[i+1][j][k];
_t_107_ = u3[i+1][j][k+2];
_t_107_ -= u3[i+1][j][k-2];
_t_105_ = c2 * _t_107_;
_t_108_ = u3[i+1][j][k+1];
_t_108_ -= u3[i+1][j][k-1];
_t_105_ += c1 * _t_108_;
_t_90_ += _t_104_ * _t_105_;
_t_113_ = 2.0 * mu[i-1][j][k];
_t_113_ += la[i-1][j][k];
_t_112_ = _t_113_ * met2[i-1][j][k];
_t_110_ = _t_112_ * met1[i-1][j][k];
_t_114_ = u1[i-1][j][k+2];
_t_114_ -= u1[i-1][j][k-2];
_t_111_ = c2 * _t_114_;
_t_115_ = u1[i-1][j][k+1];
_t_115_ -= u1[i-1][j][k-1];
_t_111_ += c1 * _t_115_;
_t_109_ = _t_110_ * _t_111_;
_t_90_ += _t_109_ * strx[i];
_t_119_ = la[i-1][j][k] * met3[i-1][j][k];
_t_117_ = _t_119_ * met1[i-1][j][k];
_t_120_ = u2[i-1][j][k+2];
_t_120_ -= u2[i-1][j][k-2];
_t_118_ = c2 * _t_120_;
_t_121_ = u2[i-1][j][k+1];
_t_121_ -= u2[i-1][j][k-1];
_t_118_ += c1 * _t_121_;
_t_116_ = _t_117_ * _t_118_;
_t_90_ += _t_116_ * stry[j];
_t_124_ = la[i-1][j][k] * met4[i-1][j][k];
_t_122_ = _t_124_ * met1[i-1][j][k];
_t_125_ = u3[i-1][j][k+2];
_t_125_ -= u3[i-1][j][k-2];
_t_123_ = c2 * _t_125_;
_t_126_ = u3[i-1][j][k+1];
_t_126_ -= u3[i-1][j][k-1];
_t_123_ += c1 * _t_126_;
_t_90_ += _t_122_ * _t_123_;
_t_52_ += c1 * _t_90_;
_t_51_ = _t_52_ * stry[j];
r1ic0jc0kc0 += _t_51_;
_t_134_ = 2.0 * mu[i][j][k+2];
_t_134_ += la[i][j][k+2];
_t_133_ = _t_134_ * met2[i][j][k+2];
_t_131_ = _t_133_ * met1[i][j][k+2];
_t_135_ = u1[i+2][j][k+2];
_t_135_ -= u1[i-2][j][k+2];
_t_132_ = c2 * _t_135_;
_t_136_ = u1[i+1][j][k+2];
_t_136_ -= u1[i-1][j][k+2];
_t_132_ += c1 * _t_136_;
_t_130_ = _t_131_ * _t_132_;
_t_129_ = _t_130_ * strx[i];
_t_128_ = _t_129_ * stry[j];
_t_139_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_137_ = _t_139_ * met1[i][j][k+2];
_t_140_ = u2[i+2][j][k+2];
_t_140_ -= u2[i-2][j][k+2];
_t_138_ = c2 * _t_140_;
_t_141_ = u2[i+1][j][k+2];
_t_141_ -= u2[i-1][j][k+2];
_t_138_ += c1 * _t_141_;
_t_128_ += _t_137_ * _t_138_;
_t_145_ = mu[i][j][k+2] * met4[i][j][k+2];
_t_143_ = _t_145_ * met1[i][j][k+2];
_t_146_ = u3[i+2][j][k+2];
_t_146_ -= u3[i-2][j][k+2];
_t_144_ = c2 * _t_146_;
_t_147_ = u3[i+1][j][k+2];
_t_147_ -= u3[i-1][j][k+2];
_t_144_ += c1 * _t_147_;
_t_142_ = _t_143_ * _t_144_;
_t_128_ += _t_142_ * stry[j];
_t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
_t_152_ = _t_153_ * met2[i][j][k-2];
_t_150_ = _t_152_ * met1[i][j][k-2];
_t_154_ = u1[i+2][j][k-2];
_t_154_ -= u1[i-2][j][k-2];
_t_151_ = c2 * _t_154_;
_t_155_ = u1[i+1][j][k-2];
_t_155_ -= u1[i-1][j][k-2];
_t_151_ += c1 * _t_155_;
_t_149_ = _t_150_ * _t_151_;
_t_148_ = _t_149_ * strx[i];
_t_128_ += _t_148_ * stry[j];
_t_158_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_156_ = _t_158_ * met1[i][j][k-2];
_t_159_ = u2[i+2][j][k-2];
_t_159_ -= u2[i-2][j][k-2];
_t_157_ = c2 * _t_159_;
_t_160_ = u2[i+1][j][k-2];
_t_160_ -= u2[i-1][j][k-2];
_t_157_ += c1 * _t_160_;
_t_128_ += _t_156_ * _t_157_;
_t_164_ = mu[i][j][k-2] * met4[i][j][k-2];
_t_162_ = _t_164_ * met1[i][j][k-2];
_t_165_ = u3[i+2][j][k-2];
_t_165_ -= u3[i-2][j][k-2];
_t_163_ = c2 * _t_165_;
_t_166_ = u3[i+1][j][k-2];
_t_166_ -= u3[i-1][j][k-2];
_t_163_ += c1 * _t_166_;
_t_161_ = _t_162_ * _t_163_;
_t_128_ += _t_161_ * stry[j];
_t_127_ = c2 * _t_128_;
_t_173_ = 2.0 * mu[i][j][k+1];
_t_173_ += la[i][j][k+1];
_t_172_ = _t_173_ * met2[i][j][k+1];
_t_170_ = _t_172_ * met1[i][j][k+1];
_t_174_ = u1[i+2][j][k+1];
_t_174_ -= u1[i-2][j][k+1];
_t_171_ = c2 * _t_174_;
_t_175_ = u1[i+1][j][k+1];
_t_175_ -= u1[i-1][j][k+1];
_t_171_ += c1 * _t_175_;
_t_169_ = _t_170_ * _t_171_;
_t_168_ = _t_169_ * strx[i+2];
_t_167_ = _t_168_ * stry[j];
_t_178_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_176_ = _t_178_ * met1[i][j][k+1];
_t_179_ = u2[i+2][j][k+1];
_t_179_ -= u2[i-2][j][k+1];
_t_177_ = c2 * _t_179_;
_t_180_ = u2[i+1][j][k+1];
_t_180_ -= u2[i-1][j][k+1];
_t_177_ += c1 * _t_180_;
_t_167_ += _t_176_ * _t_177_;
_t_184_ = mu[i][j][k+1] * met4[i][j][k+1];
_t_182_ = _t_184_ * met1[i][j][k+1];
_t_185_ = u3[i+2][j][k+1];
_t_185_ -= u3[i-2][j][k+1];
_t_183_ = c2 * _t_185_;
_t_186_ = u3[i+1][j][k+1];
_t_186_ -= u3[i-1][j][k+1];
_t_183_ += c1 * _t_186_;
_t_181_ = _t_182_ * _t_183_;
_t_167_ += _t_181_ * stry[j];
_t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
_t_191_ = _t_192_ * met2[i][j][k-1];
_t_189_ = _t_191_ * met1[i][j][k-1];
_t_193_ = u1[i+2][j][k-1];
_t_193_ -= u1[i-2][j][k-1];
_t_190_ = c2 * _t_193_;
_t_194_ = u1[i+1][j][k-1];
_t_194_ -= u1[i-1][j][k-1];
_t_190_ += c1 * _t_194_;
_t_188_ = _t_189_ * _t_190_;
_t_187_ = _t_188_ * strx[i-2];
_t_167_ += _t_187_ * stry[j];
_t_197_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_195_ = _t_197_ * met1[i][j][k-1];
_t_198_ = u2[i+2][j][k-1];
_t_198_ -= u2[i-2][j][k-1];
_t_196_ = c2 * _t_198_;
_t_199_ = u2[i+1][j][k-1];
_t_199_ -= u2[i-1][j][k-1];
_t_196_ += c1 * _t_199_;
_t_167_ += _t_195_ * _t_196_;
_t_203_ = mu[i][j][k-1] * met4[i][j][k-1];
_t_201_ = _t_203_ * met1[i][j][k-1];
_t_204_ = u3[i+2][j][k-1];
_t_204_ -= u3[i-2][j][k-1];
_t_202_ = c2 * _t_204_;
_t_205_ = u3[i+1][j][k-1];
_t_205_ -= u3[i-1][j][k-1];
_t_202_ += c1 * _t_205_;
_t_200_ = _t_201_ * _t_202_;
_t_167_ += _t_200_ * stry[j];
_t_127_ += c1 * _t_167_;
r1ic0jc0kc0 += _t_127_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _t_6_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_3_;
double _t_2_;
double _t_1_;
double _t_11_;
double _t_9_;
double _t_12_;
double _t_10_;
double _t_13_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_17_;
double _t_20_;
double _t_15_;
double _t_14_;
double _t_23_;
double _t_21_;
double _t_24_;
double _t_22_;
double _t_25_;
double _t_0_;
double _t_31_;
double _t_29_;
double _t_32_;
double _t_30_;
double _t_33_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_36_;
double _t_34_;
double _t_37_;
double _t_35_;
double _t_38_;
double _t_43_;
double _t_41_;
double _t_44_;
double _t_42_;
double _t_45_;
double _t_40_;
double _t_39_;
double _t_48_;
double _t_46_;
double _t_49_;
double _t_47_;
double _t_50_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_55_;
double _t_53_;
double _t_56_;
double _t_54_;
double _t_57_;
double _t_52_;
double _t_60_;
double _t_58_;
double _t_61_;
double _t_59_;
double _t_62_;
double _t_51_;
double _t_66_;
double _t_64_;
double _t_67_;
double _t_65_;
double _t_68_;
double _t_63_;
double _t_71_;
double _t_69_;
double _t_72_;
double _t_70_;
double _t_73_;
double _t_77_;
double _t_75_;
double _t_78_;
double _t_76_;
double _t_79_;
double _t_74_;
double _t_82_;
double _t_80_;
double _t_83_;
double _t_81_;
double _t_84_;
double _t_88_;
double _t_86_;
double _t_89_;
double _t_87_;
double _t_90_;
double _t_85_;
double _t_93_;
double _t_91_;
double _t_94_;
double _t_92_;
double _t_95_;
_t_6_ = mu[i][j+2][k] * met3[i][j+2][k];
_t_4_ = _t_6_ * met1[i][j+2][k];
_t_7_ = u1[i][j+2][k+2];
_t_7_ -= u1[i][j+2][k-2];
_t_5_ = c2 * _t_7_;
_t_8_ = u1[i][j+2][k+1];
_t_8_ -= u1[i][j+2][k-1];
_t_5_ += c1 * _t_8_;
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * stry[j+1];
_t_1_ = _t_2_ * strx[i];
_t_11_ = mu[i][j+2][k] * met2[i][j+2][k];
_t_9_ = _t_11_ * met1[i][j+2][k];
_t_12_ = u2[i][j+2][k+2];
_t_12_ -= u2[i][j+2][k-2];
_t_10_ = c2 * _t_12_;
_t_13_ = u2[i][j+2][k+1];
_t_13_ -= u2[i][j+2][k-1];
_t_10_ += c1 * _t_13_;
_t_1_ += _t_9_ * _t_10_;
_t_18_ = mu[i][j-2][k] * met3[i][j-2][k];
_t_16_ = _t_18_ * met1[i][j-2][k];
_t_19_ = u1[i][j-2][k+2];
_t_19_ -= u1[i][j-2][k-2];
_t_17_ = c2 * _t_19_;
_t_20_ = u1[i][j-2][k+1];
_t_20_ -= u1[i][j-2][k-1];
_t_17_ += c1 * _t_20_;
_t_15_ = _t_16_ * _t_17_;
_t_14_ = _t_15_ * stry[j];
_t_1_ += _t_14_ * strx[i];
_t_23_ = mu[i][j-2][k] * met2[i][j-2][k];
_t_21_ = _t_23_ * met1[i][j-2][k];
_t_24_ = u2[i][j-2][k+2];
_t_24_ -= u2[i][j-2][k-2];
_t_22_ = c2 * _t_24_;
_t_25_ = u2[i][j-2][k+1];
_t_25_ -= u2[i][j-2][k-1];
_t_22_ += c1 * _t_25_;
_t_1_ += _t_21_ * _t_22_;
_t_0_ = c2 * _t_1_;
_t_31_ = mu[i][j+1][k] * met3[i][j+1][k];
_t_29_ = _t_31_ * met1[i][j+1][k];
_t_32_ = u1[i][j+1][k+2];
_t_32_ -= u1[i][j+1][k-2];
_t_30_ = c2 * _t_32_;
_t_33_ = u1[i][j+1][k+1];
_t_33_ -= u1[i][j+1][k-1];
_t_30_ += c1 * _t_33_;
_t_28_ = _t_29_ * _t_30_;
_t_27_ = _t_28_ * stry[j-1];
_t_26_ = _t_27_ * strx[i];
_t_36_ = mu[i][j+1][k] * met2[i][j+1][k];
_t_34_ = _t_36_ * met1[i][j+1][k];
_t_37_ = u2[i][j+1][k+2];
_t_37_ -= u2[i][j+1][k-2];
_t_35_ = c2 * _t_37_;
_t_38_ = u2[i][j+1][k+1];
_t_38_ -= u2[i][j+1][k-1];
_t_35_ += c1 * _t_38_;
_t_26_ += _t_34_ * _t_35_;
_t_43_ = mu[i][j-1][k] * met3[i][j-1][k];
_t_41_ = _t_43_ * met1[i][j-1][k];
_t_44_ = u1[i][j-1][k+2];
_t_44_ -= u1[i][j-1][k-2];
_t_42_ = c2 * _t_44_;
_t_45_ = u1[i][j-1][k+1];
_t_45_ -= u1[i][j-1][k-1];
_t_42_ += c1 * _t_45_;
_t_40_ = _t_41_ * _t_42_;
_t_39_ = _t_40_ * stry[j];
_t_26_ += _t_39_ * strx[i];
_t_48_ = mu[i][j-1][k] * met2[i][j-1][k];
_t_46_ = _t_48_ * met1[i][j-1][k];
_t_49_ = u2[i][j-1][k+2];
_t_49_ -= u2[i][j-1][k-2];
_t_47_ = c2 * _t_49_;
_t_50_ = u2[i][j-1][k+1];
_t_50_ -= u2[i][j-1][k-1];
_t_47_ += c1 * _t_50_;
_t_26_ += _t_46_ * _t_47_;
_t_0_ += c1 * _t_26_;
r1ic0jc0kc0 += _t_0_;
_t_55_ = mu[i][j+2][k] * met1[i][j+2][k];
_t_53_ = _t_55_ * met1[i][j+2][k];
_t_56_ = u2[i+2][j+2][k];
_t_56_ -= u2[i-2][j+2][k];
_t_54_ = c2 * _t_56_;
_t_57_ = u2[i+1][j+2][k];
_t_57_ -= u2[i-1][j+2][k];
_t_54_ += c1 * _t_57_;
_t_52_ = _t_53_ * _t_54_;
_t_60_ = mu[i][j-2][k] * met1[i][j-2][k];
_t_58_ = _t_60_ * met1[i][j-2][k];
_t_61_ = u2[i+2][j-2][k];
_t_61_ -= u2[i-2][j-2][k];
_t_59_ = c2 * _t_61_;
_t_62_ = u2[i+1][j-2][k];
_t_62_ -= u2[i-1][j-2][k];
_t_59_ += c1 * _t_62_;
_t_52_ += _t_58_ * _t_59_;
_t_51_ = c2 * _t_52_;
_t_66_ = mu[i][j+1][k] * met1[i][j+1][k];
_t_64_ = _t_66_ * met1[i][j+1][k];
_t_67_ = u2[i+2][j+1][k];
_t_67_ -= u2[i-2][j+1][k];
_t_65_ = c2 * _t_67_;
_t_68_ = u2[i+1][j+1][k];
_t_68_ -= u2[i-1][j+1][k];
_t_65_ += c1 * _t_68_;
_t_63_ = _t_64_ * _t_65_;
_t_71_ = mu[i][j-1][k] * met1[i][j-1][k];
_t_69_ = _t_71_ * met1[i][j-1][k];
_t_72_ = u2[i+2][j-1][k];
_t_72_ -= u2[i-2][j-1][k];
_t_70_ = c2 * _t_72_;
_t_73_ = u2[i+1][j-1][k];
_t_73_ -= u2[i-1][j-1][k];
_t_70_ += c1 * _t_73_;
_t_63_ += _t_69_ * _t_70_;
_t_51_ += c1 * _t_63_;
_t_77_ = la[i+2][j][k] * met1[i+2][j][k];
_t_75_ = _t_77_ * met1[i+2][j][k];
_t_78_ = u2[i+2][j+2][k];
_t_78_ -= u2[i+2][j-2][k];
_t_76_ = c2 * _t_78_;
_t_79_ = u2[i+2][j+1][k];
_t_79_ -= u2[i+2][j-1][k];
_t_76_ += c1 * _t_79_;
_t_74_ = _t_75_ * _t_76_;
_t_82_ = la[i-2][j][k] * met1[i-2][j][k];
_t_80_ = _t_82_ * met1[i-2][j][k];
_t_83_ = u2[i-2][j+2][k];
_t_83_ -= u2[i-2][j-2][k];
_t_81_ = c2 * _t_83_;
_t_84_ = u2[i-2][j+1][k];
_t_84_ -= u2[i-2][j-1][k];
_t_81_ += c1 * _t_84_;
_t_74_ += _t_80_ * _t_81_;
_t_51_ += c2 * _t_74_;
_t_88_ = la[i+1][j][k] * met1[i+1][j][k];
_t_86_ = _t_88_ * met1[i+1][j][k];
_t_89_ = u2[i+1][j+2][k];
_t_89_ -= u2[i+1][j-2][k];
_t_87_ = c2 * _t_89_;
_t_90_ = u2[i+1][j+1][k];
_t_90_ -= u2[i+1][j-1][k];
_t_87_ += c1 * _t_90_;
_t_85_ = _t_86_ * _t_87_;
_t_93_ = la[i-1][j][k] * met1[i-1][j][k];
_t_91_ = _t_93_ * met1[i-1][j][k];
_t_94_ = u2[i-1][j+2][k];
_t_94_ -= u2[i-1][j-2][k];
_t_92_ = c2 * _t_94_;
_t_95_ = u2[i-1][j+1][k];
_t_95_ -= u2[i-1][j-1][k];
_t_92_ += c1 * _t_95_;
_t_85_ += _t_91_ * _t_92_;
_t_51_ += c1 * _t_85_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
hipMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u1;
hipMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u2;
hipMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u3;
hipMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met1;
hipMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met2;
hipMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met3;
hipMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *met4;
hipMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( curvi_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
hipLaunchKernelGGL(( curvi_2) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
}
| 8e8883a4aa28c057949afe999038dce81151cb66.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_6_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_3_;
double _t_2_;
double _t_1_;
double _t_11_;
double _t_9_;
double _t_12_;
double _t_10_;
double _t_13_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_17_;
double _t_20_;
double _t_15_;
double _t_14_;
double _t_23_;
double _t_21_;
double _t_24_;
double _t_22_;
double _t_25_;
double _t_0_;
double _t_31_;
double _t_29_;
double _t_32_;
double _t_30_;
double _t_33_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_36_;
double _t_34_;
double _t_37_;
double _t_35_;
double _t_38_;
double _t_43_;
double _t_41_;
double _t_44_;
double _t_42_;
double _t_45_;
double _t_40_;
double _t_39_;
double _t_48_;
double _t_46_;
double _t_49_;
double _t_47_;
double _t_50_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_58_;
double _t_57_;
double _t_55_;
double _t_59_;
double _t_56_;
double _t_60_;
double _t_54_;
double _t_53_;
double _t_64_;
double _t_62_;
double _t_65_;
double _t_63_;
double _t_66_;
double _t_61_;
double _t_69_;
double _t_67_;
double _t_70_;
double _t_68_;
double _t_71_;
double _t_76_;
double _t_75_;
double _t_73_;
double _t_77_;
double _t_74_;
double _t_78_;
double _t_72_;
double _t_82_;
double _t_80_;
double _t_83_;
double _t_81_;
double _t_84_;
double _t_79_;
double _t_87_;
double _t_85_;
double _t_88_;
double _t_86_;
double _t_89_;
double _t_52_;
double _t_95_;
double _t_94_;
double _t_92_;
double _t_96_;
double _t_93_;
double _t_97_;
double _t_91_;
double _t_90_;
double _t_101_;
double _t_99_;
double _t_102_;
double _t_100_;
double _t_103_;
double _t_98_;
double _t_106_;
double _t_104_;
double _t_107_;
double _t_105_;
double _t_108_;
double _t_113_;
double _t_112_;
double _t_110_;
double _t_114_;
double _t_111_;
double _t_115_;
double _t_109_;
double _t_119_;
double _t_117_;
double _t_120_;
double _t_118_;
double _t_121_;
double _t_116_;
double _t_124_;
double _t_122_;
double _t_125_;
double _t_123_;
double _t_126_;
double _t_51_;
double _t_134_;
double _t_133_;
double _t_131_;
double _t_135_;
double _t_132_;
double _t_136_;
double _t_130_;
double _t_129_;
double _t_128_;
double _t_139_;
double _t_137_;
double _t_140_;
double _t_138_;
double _t_141_;
double _t_145_;
double _t_143_;
double _t_146_;
double _t_144_;
double _t_147_;
double _t_142_;
double _t_153_;
double _t_152_;
double _t_150_;
double _t_154_;
double _t_151_;
double _t_155_;
double _t_149_;
double _t_148_;
double _t_158_;
double _t_156_;
double _t_159_;
double _t_157_;
double _t_160_;
double _t_164_;
double _t_162_;
double _t_165_;
double _t_163_;
double _t_166_;
double _t_161_;
double _t_127_;
double _t_173_;
double _t_172_;
double _t_170_;
double _t_174_;
double _t_171_;
double _t_175_;
double _t_169_;
double _t_168_;
double _t_167_;
double _t_178_;
double _t_176_;
double _t_179_;
double _t_177_;
double _t_180_;
double _t_184_;
double _t_182_;
double _t_185_;
double _t_183_;
double _t_186_;
double _t_181_;
double _t_192_;
double _t_191_;
double _t_189_;
double _t_193_;
double _t_190_;
double _t_194_;
double _t_188_;
double _t_187_;
double _t_197_;
double _t_195_;
double _t_198_;
double _t_196_;
double _t_199_;
double _t_203_;
double _t_201_;
double _t_204_;
double _t_202_;
double _t_205_;
double _t_200_;
_t_6_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_4_ = _t_6_ * met1[i][j][k+2];
_t_7_ = u1[i][j+2][k+2];
_t_7_ -= u1[i][j-2][k+2];
_t_5_ = c2 * _t_7_;
_t_8_ = u1[i][j+1][k+2];
_t_8_ -= u1[i][j-1][k+2];
_t_5_ += c1 * _t_8_;
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * stry[j+2];
_t_1_ = _t_2_ * strx[i];
_t_11_ = la[i][j][k+2] * met2[i][j][k+2];
_t_9_ = _t_11_ * met1[i][j][k+2];
_t_12_ = u2[i][j+2][k+2];
_t_12_ -= u2[i][j-2][k+2];
_t_10_ = c2 * _t_12_;
_t_13_ = u2[i][j+1][k+2];
_t_13_ -= u2[i][j-1][k+2];
_t_10_ += c1 * _t_13_;
_t_1_ += _t_9_ * _t_10_;
_t_18_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_16_ = _t_18_ * met1[i][j][k-2];
_t_19_ = u1[i][j+2][k-2];
_t_19_ -= u1[i][j-2][k-2];
_t_17_ = c2 * _t_19_;
_t_20_ = u1[i][j+1][k-2];
_t_20_ -= u1[i][j-1][k-2];
_t_17_ += c1 * _t_20_;
_t_15_ = _t_16_ * _t_17_;
_t_14_ = _t_15_ * stry[j];
_t_1_ += _t_14_ * strx[i];
_t_23_ = la[i][j][k-2] * met2[i][j][k-2];
_t_21_ = _t_23_ * met1[i][j][k-2];
_t_24_ = u2[i][j+2][k-2];
_t_24_ -= u2[i][j-2][k-2];
_t_22_ = c2 * _t_24_;
_t_25_ = u2[i][j+1][k-2];
_t_25_ -= u2[i][j-1][k-2];
_t_22_ += c1 * _t_25_;
_t_1_ += _t_21_ * _t_22_;
_t_0_ = c2 * _t_1_;
_t_31_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_29_ = _t_31_ * met1[i][j][k+1];
_t_32_ = u1[i][j+2][k+1];
_t_32_ -= u1[i][j-2][k+1];
_t_30_ = c2 * _t_32_;
_t_33_ = u1[i][j+1][k+1];
_t_33_ -= u1[i][j-1][k+1];
_t_30_ += c1 * _t_33_;
_t_28_ = _t_29_ * _t_30_;
_t_27_ = _t_28_ * stry[j-2];
_t_26_ = _t_27_ * strx[i];
_t_36_ = la[i][j][k+1] * met2[i][j][k+1];
_t_34_ = _t_36_ * met1[i][j][k+1];
_t_37_ = u2[i][j+2][k+1];
_t_37_ -= u2[i][j-2][k+1];
_t_35_ = c2 * _t_37_;
_t_38_ = u2[i][j+1][k+1];
_t_38_ -= u2[i][j-1][k+1];
_t_35_ += c1 * _t_38_;
_t_26_ += _t_34_ * _t_35_;
_t_43_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_41_ = _t_43_ * met1[i][j][k-1];
_t_44_ = u1[i][j+2][k-1];
_t_44_ -= u1[i][j-2][k-1];
_t_42_ = c2 * _t_44_;
_t_45_ = u1[i][j+1][k-1];
_t_45_ -= u1[i][j-1][k-1];
_t_42_ += c1 * _t_45_;
_t_40_ = _t_41_ * _t_42_;
_t_39_ = _t_40_ * stry[j];
_t_26_ += _t_39_ * strx[i];
_t_48_ = la[i][j][k-1] * met2[i][j][k-1];
_t_46_ = _t_48_ * met1[i][j][k-1];
_t_49_ = u2[i][j+2][k-1];
_t_49_ -= u2[i][j-2][k-1];
_t_47_ = c2 * _t_49_;
_t_50_ = u2[i][j+1][k-1];
_t_50_ -= u2[i][j-1][k-1];
_t_47_ += c1 * _t_50_;
_t_26_ += _t_46_ * _t_47_;
_t_0_ += c1 * _t_26_;
r1ic0jc0kc0 += _t_0_;
_t_58_ = 2.0 * mu[i+2][j][k];
_t_58_ += la[i+2][j][k];
_t_57_ = _t_58_ * met2[i+2][j][k];
_t_55_ = _t_57_ * met1[i+2][j][k];
_t_59_ = u1[i+2][j][k+2];
_t_59_ -= u1[i+2][j][k-2];
_t_56_ = c2 * _t_59_;
_t_60_ = u1[i+2][j][k+1];
_t_60_ -= u1[i+2][j][k-1];
_t_56_ += c1 * _t_60_;
_t_54_ = _t_55_ * _t_56_;
_t_53_ = _t_54_ * strx[i];
_t_64_ = la[i+2][j][k] * met3[i+2][j][k];
_t_62_ = _t_64_ * met1[i+2][j][k];
_t_65_ = u2[i+2][j][k+2];
_t_65_ -= u2[i+2][j][k-2];
_t_63_ = c2 * _t_65_;
_t_66_ = u2[i+2][j][k+1];
_t_66_ -= u2[i+2][j][k-1];
_t_63_ += c1 * _t_66_;
_t_61_ = _t_62_ * _t_63_;
_t_53_ += _t_61_ * stry[j];
_t_69_ = la[i+2][j][k] * met4[i+2][j][k];
_t_67_ = _t_69_ * met1[i+2][j][k];
_t_70_ = u3[i+2][j][k+2];
_t_70_ -= u3[i+2][j][k-2];
_t_68_ = c2 * _t_70_;
_t_71_ = u3[i+2][j][k+1];
_t_71_ -= u3[i+2][j][k-1];
_t_68_ += c1 * _t_71_;
_t_53_ += _t_67_ * _t_68_;
_t_76_ = 2.0 * mu[i-2][j][k];
_t_76_ += la[i-2][j][k];
_t_75_ = _t_76_ * met2[i-2][j][k];
_t_73_ = _t_75_ * met1[i-2][j][k];
_t_77_ = u1[i-2][j][k+2];
_t_77_ -= u1[i-2][j][k-2];
_t_74_ = c2 * _t_77_;
_t_78_ = u1[i-2][j][k+1];
_t_78_ -= u1[i-2][j][k-1];
_t_74_ += c1 * _t_78_;
_t_72_ = _t_73_ * _t_74_;
_t_53_ += _t_72_ * strx[i];
_t_82_ = la[i-2][j][k] * met3[i-2][j][k];
_t_80_ = _t_82_ * met1[i-2][j][k];
_t_83_ = u2[i-2][j][k+2];
_t_83_ -= u2[i-2][j][k-2];
_t_81_ = c2 * _t_83_;
_t_84_ = u2[i-2][j][k+1];
_t_84_ -= u2[i-2][j][k-1];
_t_81_ += c1 * _t_84_;
_t_79_ = _t_80_ * _t_81_;
_t_53_ += _t_79_ * stry[j];
_t_87_ = la[i-2][j][k] * met4[i-2][j][k];
_t_85_ = _t_87_ * met1[i-2][j][k];
_t_88_ = u3[i-2][j][k+2];
_t_88_ -= u3[i-2][j][k-2];
_t_86_ = c2 * _t_88_;
_t_89_ = u3[i-2][j][k+1];
_t_89_ -= u3[i-2][j][k-1];
_t_86_ += c1 * _t_89_;
_t_53_ += _t_85_ * _t_86_;
_t_52_ = c2 * _t_53_;
_t_95_ = 2.0 * mu[i+1][j][k];
_t_95_ += la[i+1][j][k];
_t_94_ = _t_95_ * met2[i+1][j][k];
_t_92_ = _t_94_ * met1[i+1][j][k];
_t_96_ = u1[i+1][j][k+2];
_t_96_ -= u1[i+1][j][k-2];
_t_93_ = c2 * _t_96_;
_t_97_ = u1[i+1][j][k+1];
_t_97_ -= u1[i+1][j][k-1];
_t_93_ += c1 * _t_97_;
_t_91_ = _t_92_ * _t_93_;
_t_90_ = _t_91_ * strx[i];
_t_101_ = la[i+1][j][k] * met3[i+1][j][k];
_t_99_ = _t_101_ * met1[i+1][j][k];
_t_102_ = u2[i+1][j][k+2];
_t_102_ -= u2[i+1][j][k-2];
_t_100_ = c2 * _t_102_;
_t_103_ = u2[i+1][j][k+1];
_t_103_ -= u2[i+1][j][k-1];
_t_100_ += c1 * _t_103_;
_t_98_ = _t_99_ * _t_100_;
_t_90_ += _t_98_ * stry[j];
_t_106_ = la[i+1][j][k] * met4[i+1][j][k];
_t_104_ = _t_106_ * met1[i+1][j][k];
_t_107_ = u3[i+1][j][k+2];
_t_107_ -= u3[i+1][j][k-2];
_t_105_ = c2 * _t_107_;
_t_108_ = u3[i+1][j][k+1];
_t_108_ -= u3[i+1][j][k-1];
_t_105_ += c1 * _t_108_;
_t_90_ += _t_104_ * _t_105_;
_t_113_ = 2.0 * mu[i-1][j][k];
_t_113_ += la[i-1][j][k];
_t_112_ = _t_113_ * met2[i-1][j][k];
_t_110_ = _t_112_ * met1[i-1][j][k];
_t_114_ = u1[i-1][j][k+2];
_t_114_ -= u1[i-1][j][k-2];
_t_111_ = c2 * _t_114_;
_t_115_ = u1[i-1][j][k+1];
_t_115_ -= u1[i-1][j][k-1];
_t_111_ += c1 * _t_115_;
_t_109_ = _t_110_ * _t_111_;
_t_90_ += _t_109_ * strx[i];
_t_119_ = la[i-1][j][k] * met3[i-1][j][k];
_t_117_ = _t_119_ * met1[i-1][j][k];
_t_120_ = u2[i-1][j][k+2];
_t_120_ -= u2[i-1][j][k-2];
_t_118_ = c2 * _t_120_;
_t_121_ = u2[i-1][j][k+1];
_t_121_ -= u2[i-1][j][k-1];
_t_118_ += c1 * _t_121_;
_t_116_ = _t_117_ * _t_118_;
_t_90_ += _t_116_ * stry[j];
_t_124_ = la[i-1][j][k] * met4[i-1][j][k];
_t_122_ = _t_124_ * met1[i-1][j][k];
_t_125_ = u3[i-1][j][k+2];
_t_125_ -= u3[i-1][j][k-2];
_t_123_ = c2 * _t_125_;
_t_126_ = u3[i-1][j][k+1];
_t_126_ -= u3[i-1][j][k-1];
_t_123_ += c1 * _t_126_;
_t_90_ += _t_122_ * _t_123_;
_t_52_ += c1 * _t_90_;
_t_51_ = _t_52_ * stry[j];
r1ic0jc0kc0 += _t_51_;
_t_134_ = 2.0 * mu[i][j][k+2];
_t_134_ += la[i][j][k+2];
_t_133_ = _t_134_ * met2[i][j][k+2];
_t_131_ = _t_133_ * met1[i][j][k+2];
_t_135_ = u1[i+2][j][k+2];
_t_135_ -= u1[i-2][j][k+2];
_t_132_ = c2 * _t_135_;
_t_136_ = u1[i+1][j][k+2];
_t_136_ -= u1[i-1][j][k+2];
_t_132_ += c1 * _t_136_;
_t_130_ = _t_131_ * _t_132_;
_t_129_ = _t_130_ * strx[i];
_t_128_ = _t_129_ * stry[j];
_t_139_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_137_ = _t_139_ * met1[i][j][k+2];
_t_140_ = u2[i+2][j][k+2];
_t_140_ -= u2[i-2][j][k+2];
_t_138_ = c2 * _t_140_;
_t_141_ = u2[i+1][j][k+2];
_t_141_ -= u2[i-1][j][k+2];
_t_138_ += c1 * _t_141_;
_t_128_ += _t_137_ * _t_138_;
_t_145_ = mu[i][j][k+2] * met4[i][j][k+2];
_t_143_ = _t_145_ * met1[i][j][k+2];
_t_146_ = u3[i+2][j][k+2];
_t_146_ -= u3[i-2][j][k+2];
_t_144_ = c2 * _t_146_;
_t_147_ = u3[i+1][j][k+2];
_t_147_ -= u3[i-1][j][k+2];
_t_144_ += c1 * _t_147_;
_t_142_ = _t_143_ * _t_144_;
_t_128_ += _t_142_ * stry[j];
_t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
_t_152_ = _t_153_ * met2[i][j][k-2];
_t_150_ = _t_152_ * met1[i][j][k-2];
_t_154_ = u1[i+2][j][k-2];
_t_154_ -= u1[i-2][j][k-2];
_t_151_ = c2 * _t_154_;
_t_155_ = u1[i+1][j][k-2];
_t_155_ -= u1[i-1][j][k-2];
_t_151_ += c1 * _t_155_;
_t_149_ = _t_150_ * _t_151_;
_t_148_ = _t_149_ * strx[i];
_t_128_ += _t_148_ * stry[j];
_t_158_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_156_ = _t_158_ * met1[i][j][k-2];
_t_159_ = u2[i+2][j][k-2];
_t_159_ -= u2[i-2][j][k-2];
_t_157_ = c2 * _t_159_;
_t_160_ = u2[i+1][j][k-2];
_t_160_ -= u2[i-1][j][k-2];
_t_157_ += c1 * _t_160_;
_t_128_ += _t_156_ * _t_157_;
_t_164_ = mu[i][j][k-2] * met4[i][j][k-2];
_t_162_ = _t_164_ * met1[i][j][k-2];
_t_165_ = u3[i+2][j][k-2];
_t_165_ -= u3[i-2][j][k-2];
_t_163_ = c2 * _t_165_;
_t_166_ = u3[i+1][j][k-2];
_t_166_ -= u3[i-1][j][k-2];
_t_163_ += c1 * _t_166_;
_t_161_ = _t_162_ * _t_163_;
_t_128_ += _t_161_ * stry[j];
_t_127_ = c2 * _t_128_;
_t_173_ = 2.0 * mu[i][j][k+1];
_t_173_ += la[i][j][k+1];
_t_172_ = _t_173_ * met2[i][j][k+1];
_t_170_ = _t_172_ * met1[i][j][k+1];
_t_174_ = u1[i+2][j][k+1];
_t_174_ -= u1[i-2][j][k+1];
_t_171_ = c2 * _t_174_;
_t_175_ = u1[i+1][j][k+1];
_t_175_ -= u1[i-1][j][k+1];
_t_171_ += c1 * _t_175_;
_t_169_ = _t_170_ * _t_171_;
_t_168_ = _t_169_ * strx[i+2];
_t_167_ = _t_168_ * stry[j];
_t_178_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_176_ = _t_178_ * met1[i][j][k+1];
_t_179_ = u2[i+2][j][k+1];
_t_179_ -= u2[i-2][j][k+1];
_t_177_ = c2 * _t_179_;
_t_180_ = u2[i+1][j][k+1];
_t_180_ -= u2[i-1][j][k+1];
_t_177_ += c1 * _t_180_;
_t_167_ += _t_176_ * _t_177_;
_t_184_ = mu[i][j][k+1] * met4[i][j][k+1];
_t_182_ = _t_184_ * met1[i][j][k+1];
_t_185_ = u3[i+2][j][k+1];
_t_185_ -= u3[i-2][j][k+1];
_t_183_ = c2 * _t_185_;
_t_186_ = u3[i+1][j][k+1];
_t_186_ -= u3[i-1][j][k+1];
_t_183_ += c1 * _t_186_;
_t_181_ = _t_182_ * _t_183_;
_t_167_ += _t_181_ * stry[j];
_t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
_t_191_ = _t_192_ * met2[i][j][k-1];
_t_189_ = _t_191_ * met1[i][j][k-1];
_t_193_ = u1[i+2][j][k-1];
_t_193_ -= u1[i-2][j][k-1];
_t_190_ = c2 * _t_193_;
_t_194_ = u1[i+1][j][k-1];
_t_194_ -= u1[i-1][j][k-1];
_t_190_ += c1 * _t_194_;
_t_188_ = _t_189_ * _t_190_;
_t_187_ = _t_188_ * strx[i-2];
_t_167_ += _t_187_ * stry[j];
_t_197_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_195_ = _t_197_ * met1[i][j][k-1];
_t_198_ = u2[i+2][j][k-1];
_t_198_ -= u2[i-2][j][k-1];
_t_196_ = c2 * _t_198_;
_t_199_ = u2[i+1][j][k-1];
_t_199_ -= u2[i-1][j][k-1];
_t_196_ += c1 * _t_199_;
_t_167_ += _t_195_ * _t_196_;
_t_203_ = mu[i][j][k-1] * met4[i][j][k-1];
_t_201_ = _t_203_ * met1[i][j][k-1];
_t_204_ = u3[i+2][j][k-1];
_t_204_ -= u3[i-2][j][k-1];
_t_202_ = c2 * _t_204_;
_t_205_ = u3[i+1][j][k-1];
_t_205_ -= u3[i-1][j][k-1];
_t_202_ += c1 * _t_205_;
_t_200_ = _t_201_ * _t_202_;
_t_167_ += _t_200_ * stry[j];
_t_127_ += c1 * _t_167_;
r1ic0jc0kc0 += _t_127_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _t_6_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_3_;
double _t_2_;
double _t_1_;
double _t_11_;
double _t_9_;
double _t_12_;
double _t_10_;
double _t_13_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_17_;
double _t_20_;
double _t_15_;
double _t_14_;
double _t_23_;
double _t_21_;
double _t_24_;
double _t_22_;
double _t_25_;
double _t_0_;
double _t_31_;
double _t_29_;
double _t_32_;
double _t_30_;
double _t_33_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_36_;
double _t_34_;
double _t_37_;
double _t_35_;
double _t_38_;
double _t_43_;
double _t_41_;
double _t_44_;
double _t_42_;
double _t_45_;
double _t_40_;
double _t_39_;
double _t_48_;
double _t_46_;
double _t_49_;
double _t_47_;
double _t_50_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_55_;
double _t_53_;
double _t_56_;
double _t_54_;
double _t_57_;
double _t_52_;
double _t_60_;
double _t_58_;
double _t_61_;
double _t_59_;
double _t_62_;
double _t_51_;
double _t_66_;
double _t_64_;
double _t_67_;
double _t_65_;
double _t_68_;
double _t_63_;
double _t_71_;
double _t_69_;
double _t_72_;
double _t_70_;
double _t_73_;
double _t_77_;
double _t_75_;
double _t_78_;
double _t_76_;
double _t_79_;
double _t_74_;
double _t_82_;
double _t_80_;
double _t_83_;
double _t_81_;
double _t_84_;
double _t_88_;
double _t_86_;
double _t_89_;
double _t_87_;
double _t_90_;
double _t_85_;
double _t_93_;
double _t_91_;
double _t_94_;
double _t_92_;
double _t_95_;
_t_6_ = mu[i][j+2][k] * met3[i][j+2][k];
_t_4_ = _t_6_ * met1[i][j+2][k];
_t_7_ = u1[i][j+2][k+2];
_t_7_ -= u1[i][j+2][k-2];
_t_5_ = c2 * _t_7_;
_t_8_ = u1[i][j+2][k+1];
_t_8_ -= u1[i][j+2][k-1];
_t_5_ += c1 * _t_8_;
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * stry[j+1];
_t_1_ = _t_2_ * strx[i];
_t_11_ = mu[i][j+2][k] * met2[i][j+2][k];
_t_9_ = _t_11_ * met1[i][j+2][k];
_t_12_ = u2[i][j+2][k+2];
_t_12_ -= u2[i][j+2][k-2];
_t_10_ = c2 * _t_12_;
_t_13_ = u2[i][j+2][k+1];
_t_13_ -= u2[i][j+2][k-1];
_t_10_ += c1 * _t_13_;
_t_1_ += _t_9_ * _t_10_;
_t_18_ = mu[i][j-2][k] * met3[i][j-2][k];
_t_16_ = _t_18_ * met1[i][j-2][k];
_t_19_ = u1[i][j-2][k+2];
_t_19_ -= u1[i][j-2][k-2];
_t_17_ = c2 * _t_19_;
_t_20_ = u1[i][j-2][k+1];
_t_20_ -= u1[i][j-2][k-1];
_t_17_ += c1 * _t_20_;
_t_15_ = _t_16_ * _t_17_;
_t_14_ = _t_15_ * stry[j];
_t_1_ += _t_14_ * strx[i];
_t_23_ = mu[i][j-2][k] * met2[i][j-2][k];
_t_21_ = _t_23_ * met1[i][j-2][k];
_t_24_ = u2[i][j-2][k+2];
_t_24_ -= u2[i][j-2][k-2];
_t_22_ = c2 * _t_24_;
_t_25_ = u2[i][j-2][k+1];
_t_25_ -= u2[i][j-2][k-1];
_t_22_ += c1 * _t_25_;
_t_1_ += _t_21_ * _t_22_;
_t_0_ = c2 * _t_1_;
_t_31_ = mu[i][j+1][k] * met3[i][j+1][k];
_t_29_ = _t_31_ * met1[i][j+1][k];
_t_32_ = u1[i][j+1][k+2];
_t_32_ -= u1[i][j+1][k-2];
_t_30_ = c2 * _t_32_;
_t_33_ = u1[i][j+1][k+1];
_t_33_ -= u1[i][j+1][k-1];
_t_30_ += c1 * _t_33_;
_t_28_ = _t_29_ * _t_30_;
_t_27_ = _t_28_ * stry[j-1];
_t_26_ = _t_27_ * strx[i];
_t_36_ = mu[i][j+1][k] * met2[i][j+1][k];
_t_34_ = _t_36_ * met1[i][j+1][k];
_t_37_ = u2[i][j+1][k+2];
_t_37_ -= u2[i][j+1][k-2];
_t_35_ = c2 * _t_37_;
_t_38_ = u2[i][j+1][k+1];
_t_38_ -= u2[i][j+1][k-1];
_t_35_ += c1 * _t_38_;
_t_26_ += _t_34_ * _t_35_;
_t_43_ = mu[i][j-1][k] * met3[i][j-1][k];
_t_41_ = _t_43_ * met1[i][j-1][k];
_t_44_ = u1[i][j-1][k+2];
_t_44_ -= u1[i][j-1][k-2];
_t_42_ = c2 * _t_44_;
_t_45_ = u1[i][j-1][k+1];
_t_45_ -= u1[i][j-1][k-1];
_t_42_ += c1 * _t_45_;
_t_40_ = _t_41_ * _t_42_;
_t_39_ = _t_40_ * stry[j];
_t_26_ += _t_39_ * strx[i];
_t_48_ = mu[i][j-1][k] * met2[i][j-1][k];
_t_46_ = _t_48_ * met1[i][j-1][k];
_t_49_ = u2[i][j-1][k+2];
_t_49_ -= u2[i][j-1][k-2];
_t_47_ = c2 * _t_49_;
_t_50_ = u2[i][j-1][k+1];
_t_50_ -= u2[i][j-1][k-1];
_t_47_ += c1 * _t_50_;
_t_26_ += _t_46_ * _t_47_;
_t_0_ += c1 * _t_26_;
r1ic0jc0kc0 += _t_0_;
_t_55_ = mu[i][j+2][k] * met1[i][j+2][k];
_t_53_ = _t_55_ * met1[i][j+2][k];
_t_56_ = u2[i+2][j+2][k];
_t_56_ -= u2[i-2][j+2][k];
_t_54_ = c2 * _t_56_;
_t_57_ = u2[i+1][j+2][k];
_t_57_ -= u2[i-1][j+2][k];
_t_54_ += c1 * _t_57_;
_t_52_ = _t_53_ * _t_54_;
_t_60_ = mu[i][j-2][k] * met1[i][j-2][k];
_t_58_ = _t_60_ * met1[i][j-2][k];
_t_61_ = u2[i+2][j-2][k];
_t_61_ -= u2[i-2][j-2][k];
_t_59_ = c2 * _t_61_;
_t_62_ = u2[i+1][j-2][k];
_t_62_ -= u2[i-1][j-2][k];
_t_59_ += c1 * _t_62_;
_t_52_ += _t_58_ * _t_59_;
_t_51_ = c2 * _t_52_;
_t_66_ = mu[i][j+1][k] * met1[i][j+1][k];
_t_64_ = _t_66_ * met1[i][j+1][k];
_t_67_ = u2[i+2][j+1][k];
_t_67_ -= u2[i-2][j+1][k];
_t_65_ = c2 * _t_67_;
_t_68_ = u2[i+1][j+1][k];
_t_68_ -= u2[i-1][j+1][k];
_t_65_ += c1 * _t_68_;
_t_63_ = _t_64_ * _t_65_;
_t_71_ = mu[i][j-1][k] * met1[i][j-1][k];
_t_69_ = _t_71_ * met1[i][j-1][k];
_t_72_ = u2[i+2][j-1][k];
_t_72_ -= u2[i-2][j-1][k];
_t_70_ = c2 * _t_72_;
_t_73_ = u2[i+1][j-1][k];
_t_73_ -= u2[i-1][j-1][k];
_t_70_ += c1 * _t_73_;
_t_63_ += _t_69_ * _t_70_;
_t_51_ += c1 * _t_63_;
_t_77_ = la[i+2][j][k] * met1[i+2][j][k];
_t_75_ = _t_77_ * met1[i+2][j][k];
_t_78_ = u2[i+2][j+2][k];
_t_78_ -= u2[i+2][j-2][k];
_t_76_ = c2 * _t_78_;
_t_79_ = u2[i+2][j+1][k];
_t_79_ -= u2[i+2][j-1][k];
_t_76_ += c1 * _t_79_;
_t_74_ = _t_75_ * _t_76_;
_t_82_ = la[i-2][j][k] * met1[i-2][j][k];
_t_80_ = _t_82_ * met1[i-2][j][k];
_t_83_ = u2[i-2][j+2][k];
_t_83_ -= u2[i-2][j-2][k];
_t_81_ = c2 * _t_83_;
_t_84_ = u2[i-2][j+1][k];
_t_84_ -= u2[i-2][j-1][k];
_t_81_ += c1 * _t_84_;
_t_74_ += _t_80_ * _t_81_;
_t_51_ += c2 * _t_74_;
_t_88_ = la[i+1][j][k] * met1[i+1][j][k];
_t_86_ = _t_88_ * met1[i+1][j][k];
_t_89_ = u2[i+1][j+2][k];
_t_89_ -= u2[i+1][j-2][k];
_t_87_ = c2 * _t_89_;
_t_90_ = u2[i+1][j+1][k];
_t_90_ -= u2[i+1][j-1][k];
_t_87_ += c1 * _t_90_;
_t_85_ = _t_86_ * _t_87_;
_t_93_ = la[i-1][j][k] * met1[i-1][j][k];
_t_91_ = _t_93_ * met1[i-1][j][k];
_t_94_ = u2[i-1][j+2][k];
_t_94_ -= u2[i-1][j-2][k];
_t_92_ = c2 * _t_94_;
_t_95_ = u2[i-1][j+1][k];
_t_95_ -= u2[i-1][j-1][k];
_t_92_ += c1 * _t_95_;
_t_85_ += _t_91_ * _t_92_;
_t_51_ += c1 * _t_85_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
4b44de6ac15998a093edad327b3d8d8ec20ad315.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/hip/ReduceOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/native/ReduceOps.h>
#include <ATen/native/ReduceAllOps.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <ATen/hip/NumericLimits.cuh>
namespace at::native {
template <typename acc_t>
struct MinNanFunctor {
__device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {
return (at::_isnan(a) || a < b) ? a : b;
}
};
template <typename scalar_t, typename acc_t=scalar_t>
void min_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<acc_t> (MinNanFunctor<acc_t>()),
at::numeric_limits<acc_t>::upper_bound());
}
void min_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
void min_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0));
});
}
void min_all_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_all_cuda", [&] {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda);
} // namespace at::native
| 4b44de6ac15998a093edad327b3d8d8ec20ad315.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/cuda/ReduceOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/native/ReduceOps.h>
#include <ATen/native/ReduceAllOps.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <ATen/cuda/NumericLimits.cuh>
namespace at::native {
template <typename acc_t>
struct MinNanFunctor {
__device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {
return (at::_isnan(a) || a < b) ? a : b;
}
};
template <typename scalar_t, typename acc_t=scalar_t>
void min_values_kernel_cuda_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter, func_wrapper<acc_t> (MinNanFunctor<acc_t>()),
at::numeric_limits<acc_t>::upper_bound());
}
void min_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
void min_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_cuda", [&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0));
});
}
void min_all_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_all_cuda", [&] {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda);
} // namespace at::native
|
f79e4a67f1ffb8d18ef25fc4cf038b4e5109c1ba.hip | // !!! This is a file automatically generated by hipify!!!
// ***********************************************************************
//
// Rundemanen: CUDA C++ parallel program for community detection
// Md Naim ([email protected]), Fredrik Manne ([email protected])
// University of Bergen
//
// ***********************************************************************
//
// Copyright (2016) University of Bergen
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#include <algorithm>
#include <iostream>
#include "communityGPU.h"
#include"hostconstants.h"
#include"fstream"
#include <thrust/gather.h>
double Community::one_levelGaussSeidel(double init_mod, bool isLastRound,
int minSize, double easyThreshold, bool isGauss, hipStream_t *streams,
int nrStreams, hipEvent_t &start, hipEvent_t &stop) {
//NOTE: hipStream_t *streams was never used
std::cout << std::endl << " Inside method for modularity optimization ";
if (g.type == WEIGHTED) {
std::cout << "WEIGHTED Graph" << std::endl;
} else {
std::cout << "UnWeighted Graph" << std::endl;
}
bool hostPrint = false;
bool verbose = false;
int sc;
sc = 0; //std::cin>>sc;
if (sc > 1)
hostPrint = true;
if (sc > 0)
verbose = true;
/*
if (hostPrint) {
print_vector(g.indices, "indices: ");
std::cout << std::endl << "|indices|:" << g.indices.size() << std::endl;
}
*/
bool improvement = false;
int nb_moves;
double cur_mod = -1.0, new_mod = -1.0;
//hipEvent_t start, stop;
//hipEventCreate(&start);
//hipEventCreate(&stop);
unsigned int nrIteration = 0;
hipEventRecord(start, 0);
//Compute degree of each node
thrust::device_vector<int> sizesOfNhoods(g.indices.size() - 1, 0);
thrust::transform(g.indices.begin() + 1, g.indices.end(),
g.indices.begin(), sizesOfNhoods.begin(),
thrust::minus<int >());
assert(CAPACITY_FACTOR_DENOMINATOR >= CAPACITY_FACTOR_NUMERATOR);
// Filters for bins
// (-1) to hash the community id itself
int warpLimit = (WARP_TABLE_SIZE_1 * CAPACITY_FACTOR_NUMERATOR / CAPACITY_FACTOR_DENOMINATOR) - 1;
int blkSMemLimit = (SHARED_TABLE_SIZE * CAPACITY_FACTOR_NUMERATOR / CAPACITY_FACTOR_DENOMINATOR) - 1;
/*
std::cout << "warpLimit: " << warpLimit << " blkSMemLimit: " << blkSMemLimit << std::endl;
*/
IsGreaterThanLimit<int, int>filterBlkGMem(blkSMemLimit);
IsInRange<int, int> filterBlkSMem(warpLimit + 1, blkSMemLimit);
IsInRange<int, int> filterForWrp(33, warpLimit);
assert(warpLimit > 32);
IsInRange<int, int> filter_N_leq32(17, 32);
IsInRange<int, int> filter_N_leq16(9, 16);
IsInRange<int, int> filter_N_leq8(5, 8);
IsInRange<int, int> filter_N_leq4(1, 4);
IsInRange<int, int> filterForNone(0, 0); // node with no neighbors
//count #work for each bin
int nrCforBlkGMem = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterBlkGMem);
int nrCforBlkSMem = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterBlkSMem);
int nrCforWrp = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterForWrp);
int nrC_N_leq32 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq32);
int nrC_N_leq16 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq16);
int nrC_N_leq8 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq8);
int nrC_N_leq4 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq4);
int nrCforNone = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterForNone);
/*
std::cout << "distribution: "<< nrC_N_leq4 <<" : "<< nrC_N_leq8<<" : "<< nrC_N_leq16<<" : "<<
nrC_N_leq32<<" : "<<nrCforWrp<<" : "<<nrCforBlkSMem<<" : "<<nrCforBlkGMem<<std::endl;
std::cout << "distribution: "<< (100*nrC_N_leq4)/community_size <<" : "<< (100*nrC_N_leq8)/community_size<<" : "<< (100*nrC_N_leq16)/community_size <<" : "<<
(100*nrC_N_leq32)/community_size<<" : "<<(100*nrCforWrp)/community_size<<" : "<<(100*nrCforBlkSMem)/community_size<<" : "<<(100*nrCforBlkGMem)/community_size <<std::endl;
*/
// Just for statistics
IsInRange<int, int> filter_N_leq64(33, 64);
IsInRange<int, int> filter_N_leq96(65, 96);
int nrC_N_leq64 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq64);
int nrC_N_leq96 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq96);
int maxNrWrp = thrust::max(thrust::max(thrust::max(nrCforWrp, nrC_N_leq32), thrust::max(nrC_N_leq16, nrC_N_leq8)), nrC_N_leq4);
/*
if (1) {
std::cout << "-------> nrCforBlk[" << blkSMemLimit + 1 << ", -] : " << nrCforBlkGMem << std::endl;
std::cout << "-------> nrCforBlk[" << warpLimit + 1 << "," << blkSMemLimit << "] : " << nrCforBlkSMem << std::endl;
std::cout << "----------> nrCforWrp[ 33, " << warpLimit << "] : " << nrCforWrp << std::endl;
std::cout << "nrC_N_leq32 :" << nrC_N_leq32 << std::endl;
std::cout << "nrC_N_leq16 :" << nrC_N_leq16 << std::endl;
std::cout << "nrC_N_leq8 :" << nrC_N_leq8 << std::endl;
std::cout << "nrC_N_leq4 :" << nrC_N_leq4 << std::endl;
std::cout << "----------> nrCforNone[0,0] : " << nrCforNone << std::endl;
std::cout << "maxNrWrp :" << maxNrWrp << std::endl;
std::cout << "----------Statistics----------------" << std::endl;
std::cout << "nrC_N_leq64 :" << nrC_N_leq64 << std::endl;
std::cout << "nrC_N_leq96 :" << nrC_N_leq96 << std::endl;
}
*/
assert((nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8 + nrC_N_leq4 + nrCforNone) == community_size);
thrust::device_vector<int> movement_counters(maxNrWrp, 0);
//Lets copy Identities of all communities in g_next.links
g_next.links.resize(community_size, 0);
thrust::sequence(g_next.links.begin(), g_next.links.end(), 0);
//Use g_next.indices to copy community ids with decreasing sizes of neighborhood
g_next.indices.resize(community_size, -1);
//First community ids with larger neighborhoods
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(),
sizesOfNhoods.begin(), g_next.indices.begin(), filterBlkGMem);
// Then community ids with medium sized neighborhoods
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem, filterBlkSMem);
// Community ids with smaller neighborhoods
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem, filterForWrp);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp, filter_N_leq32);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32, filter_N_leq16);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16, filter_N_leq8);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8, filter_N_leq4);
///////////////////////////////////////////////////
// Now, use g_next.links to copy sizes of neighborhood according to order given by g_next.indices
g_next.links.resize(g_next.indices.size(), 0);
thrust::gather(thrust::device, g_next.indices.begin(), g_next.indices.end(), sizesOfNhoods.begin(), g_next.links.begin());
//Sort according to size of neighborhood ; only first nrCforBlkGbMem
thrust::sort_by_key(g_next.links.begin(), g_next.links.begin() + nrCforBlkGMem,
g_next.indices.begin(), thrust::greater<unsigned int>());
//////////Just to debug /////////////////
/*
if (0) {
thrust::host_vector<int> esSizes = sizesOfNhoods;
thrust::host_vector<int> bigCommunites = g_next.indices;
for (int k = 0; k < thrust::min<int>(bigCommunites.size(), 5); k++) {
std::cout << bigCommunites[k] << "::" << esSizes[bigCommunites[k]] << std::endl;
}
esSizes.clear();
bigCommunites.clear();
}
*/
///////////////////////////////Allocate data for Global HashTable////////////////////
int nrBlockForLargeNhoods = 90;
nrBlockForLargeNhoods = thrust::min(thrust::max(nrCforBlkGMem, nrCforBlkSMem), nrBlockForLargeNhoods);
thrust::device_vector<int> hashTablePtrs(nrBlockForLargeNhoods + 1, 0);
//g_next.links contains sizes of big neighborhoods
thrust::inclusive_scan(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods,
hashTablePtrs.begin() + 1, thrust::plus<int>());
thrust::device_vector<HashItem> globalHashTable(2 * hashTablePtrs.back());
int szHTmem = thrust::reduce(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods, (int) 0);
//std::cout << globalHashTable.size() << ":" << 2 * szHTmem << std::endl;
thrust::device_vector<int> moveCounters(nrBlockForLargeNhoods, 0);
////////////////////////////////////////////////////
unsigned int wrpSz = PHY_WRP_SZ;
int nr_of_block = 0;
//std::cout << " g.weight:(copied from host graph) " << g.total_weight << std::endl;
//////////////////////////////////////////////////////////////
n2c.resize(community_size);
thrust::sequence(n2c.begin(), n2c.end(), 0);
//std::cout << "community_size : " << community_size << " n2c.size : " << n2c.size() << std::endl;
thrust::device_vector< int> n2c_old(n2c.size(), -1);
assert(community_size == n2c.size());
g.total_weight = 0.0;
if (g.type == WEIGHTED) {
g.total_weight = thrust::reduce(thrust::device, g.weights.begin(), g.weights.end(), (double) 0, thrust::plus<double>());
} else {
g.total_weight = (double) g.nb_links;
}
report_time(start, stop, "FilterCopy&M");
//std::cout << " g.weight(computed in device): " << g.total_weight << std::endl;
thrust::device_vector< int> cardinalityOfComms(community_size, 1); // cardinality of each community
thrust::device_vector< int> cardinalityOfComms_new(community_size, 0); // cardinality of each community
thrust::device_vector<float> tot_new(community_size, 0.0);
thrust::device_vector<float> tot(community_size, 0.0);
thrust::device_vector<float> in(community_size, 0.0);
in.resize(community_size);
tot.resize(community_size);
tot_new.resize(community_size);
// n2c_new.clear();
n2c_new.resize(community_size);
/////////////////////////////////////////////////////////////
wrpSz = PHY_WRP_SZ;
int load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / wrpSz);
nr_of_block = (community_size + load_per_blk - 1) / load_per_blk;
thrust::device_vector<float> wDegs(community_size, 0.0);
hipEventRecord(start, 0);
preComputeWdegs << <nr_of_block, NR_THREAD_PER_BLOCK>>>(thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(wDegs.data()),
g.type, community_size, wrpSz);
report_time(start, stop, "preComputeWdegs");
//////////////////////////////////////////////////////////////
wrpSz = PHY_WRP_SZ;
load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / wrpSz);
nr_of_block = (community_size + load_per_blk - 1) / load_per_blk;
int size_of_shared_memory = (2 * CHUNK_PER_WARP + 1)*(NR_THREAD_PER_BLOCK / wrpSz) * sizeof (int);
hipEventRecord(start, 0);
initialize_in_tot << < nr_of_block, NR_THREAD_PER_BLOCK, size_of_shared_memory >>>(community_size,
thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(tot.data()),
NULL, thrust::raw_pointer_cast(n2c.data()), g.type, NULL, wrpSz,
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "initialize_in_tot");
//////////////////////////////////////
int loopCnt = 0;
double threshold = min_modularity;
if (community_size > minSize && isLastRound == false)
threshold = easyThreshold;
std::cout<<"Status:: community size - "<<community_size<<" threshold - "<<threshold<<std::endl;
// std::cout << "minSize: " << minSize << std::endl;
//NEVER set it to TRUE; it doesn't work!!!!!!!!!!!
bool isToUpdate = false; // true;
clock_t t1, t2;
do {
t1 = clock();
loopCnt++;
// std::cout << " ---------------------------- do-while ---------------------" << loopCnt << std::endl;
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
n2c_new = n2c; // MUST NEEDED Assignment
tot_new = tot;
cardinalityOfComms_new = cardinalityOfComms;
//thrust::fill_n(thrust::device, tot_new.begin(), tot_new.size(),0.0);
//thrust::fill_n(thrust::device, cardinalityOfComms_new.begin(), cardinalityOfComms_new.size(),0);
nb_moves = 0;
unsigned int bucketSizePerWarp = 0;
size_t sizeHashMem = 0;
// Initialize counters
movement_counters.clear();
movement_counters.resize(maxNrWrp, 0);
moveCounters.clear();
moveCounters.resize(nrBlockForLargeNhoods, 0);
if (nrCforBlkGMem > 0) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
hipEventRecord(start, 0);
//std::cout<<" nrBlockForLargeNhoods: "<<nrBlockForLargeNhoods<<" nrCforBlkGMem: "<< nrCforBlkGMem<<std::endl;
lookAtNeigboringComms << <nrBlockForLargeNhoods, (NR_THREAD_PER_BLOCK * 2)>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms");
//nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
/*
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem);
}
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
/*
if (verbose) {
std::cout << "Before Traversing.............. " << std::endl;
print_vector(tot, "tot:");
}
if (verbose) {
std::cout << " community_size:" << community_size << std::endl;
std::cout << " g.indices:" << g.indices.size() << std::endl;
std::cout << " g.links:" << g.links.size() << std::endl;
std::cout << " g.weights:" << g.weights.size() << std::endl;
std::cout << " n2c:" << n2c.size() << std::endl;
std::cout << " in:" << in.size() << std::endl;
std::cout << " n2c_new:" << n2c_new.size() << std::endl;
std::cout << " tot_new:" << tot_new.size() << std::endl;
std::cout << " movement_counters: " << movement_counters.size() << std::endl;
std::cout << " g.total_weight: " << g.total_weight << std::endl;
}
if (verbose) {
nb_moves = thrust::reduce(movement_counters.begin(), movement_counters.end(), (int) 0);
std::cout << "---------*Now* " << nb_moves << std::endl;
}
*/
sc = 0; //std::cin>>sc;
//////////////////////////////////////////////////
/*
if (nrC_N_leq32) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrC_N_leq32 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 61; //MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
hipEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm(<=32)");
nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq32, (int) 0);
if(0){
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
if (nrCforWrp) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrCforWrp + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = WARP_TABLE_SIZE_1;
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
hipEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem,
nrCforWrp, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm");
nb_moves = thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrCforWrp, (int) 0);
// change community assignment of processed vertices
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem, // of these communities
nrCforWrp);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
if (nrCforBlkGMem > 0) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
hipEventRecord(start, 0);
lookAtNeigboringComms << <nrBlockForLargeNhoods, (NR_THREAD_PER_BLOCK * 2)>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms");
nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
if (nrCforBlkSMem > 0) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
hipEventRecord(start, 0);
lookAtNeigboringComms << <nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms(sh)");
nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
*/
//////////////////////////////////////////////////
if (nrC_N_leq8) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = QUARTER_WARP;
nr_of_block = (nrC_N_leq8 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 17; // MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
/*
if (0) {
std::cin>>sc;
print_vector(g.indices, "g.indices: ");
print_vector(g.links, "g.links: ");
print_vector(n2c, "n2c:");
print_vector(in, "in: ");
print_vector(tot, "tot:");
print_vector(n2c_new, "n2c_new:");
print_vector(tot_new, "tot_new:");
print_vector(movement_counters, "movement_counters:");
print_vector(g_next.indices, "g_next.indices:");
print_vector(devPrimes, "devPrimes:");
print_vector(cardinalityOfComms, "cardinalityOfComms:");
print_vector(cardinalityOfComms_new, "cardinalityOfComms_new:");
}
*/
hipEventRecord(start, 0);
//print_vector(in, "in (*): ");
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16,
nrC_N_leq8, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
//print_vector(in, "in (*): ");
report_time(start, stop, "neigh_comm ( <=8)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq8, (int) 0);
/*
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16,
nrC_N_leq8);
*/
if (isGauss) {
if (isToUpdate) {
assert(community_size == n2c.size());
assert(community_size == n2c_new.size());
assert(community_size == tot.size());
assert(community_size == tot_new.size());
assert(community_size == cardinalityOfComms.size());
assert(community_size == cardinalityOfComms_new.size());
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrC_N_leq16) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = HALF_WARP;
nr_of_block = (nrC_N_leq16 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 31; // MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
hipEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32,
nrC_N_leq16, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm ( <=16)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq16, (int) 0);
/*
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32,
nrC_N_leq16);
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrC_N_leq4) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = QUARTER_WARP / 2;
nr_of_block = (nrC_N_leq4 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 7; // MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
/*
if (0) {
std::cin>>sc;
print_vector(g.indices, "g.indices: ");
print_vector(g.links, "g.links: ");
print_vector(n2c, "n2c:");
print_vector(in, "in: ");
print_vector(tot, "tot:");
print_vector(n2c_new, "n2c_new:");
print_vector(tot_new, "tot_new:");
print_vector(movement_counters, "movement_counters:");
print_vector(g_next.indices, "g_next.indices:");
print_vector(devPrimes, "devPrimes:");
print_vector(cardinalityOfComms, "cardinalityOfComms:");
print_vector(cardinalityOfComms_new, "cardinalityOfComms_new:");
}
*/
hipEventRecord(start, 0);
//print_vector(in, "in (*): ");
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8,
nrC_N_leq4, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
//print_vector(in, "in (*): ");
report_time(start, stop, "neigh_comm ( <=4)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq4, (int) 0);
/*
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8 ,
nrC_N_leq4);
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrC_N_leq32) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrC_N_leq32 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 61; //MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
hipEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm(<=32)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq32, (int) 0);
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32);
}
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrCforWrp) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrCforWrp + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = WARP_TABLE_SIZE_1;
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
hipEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem,
nrCforWrp, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm");
//nb_moves = thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrCforWrp, (int) 0);
// change community assignment of processed vertices
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem, // of these communities
nrCforWrp);
}
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrCforBlkSMem > 0) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
hipEventRecord(start, 0);
//std::cout<<" nrBlockForLargeNhoods :"<<nrBlockForLargeNhoods<<" nrCforBlkSMem: "<< nrCforBlkSMem<<std::endl;
lookAtNeigboringComms << <nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms(sh)");
//nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
/*
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem);
}
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
#ifdef LARGE_LATER
if (nrCforBlkGMem > 0) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
hipEventRecord(start, 0);
//std::cout<<" nrBlockForLargeNhoods: "<<nrBlockForLargeNhoods<<" nrCforBlkGMem: "<< nrCforBlkGMem<<std::endl;
lookAtNeigboringComms << <nrBlockForLargeNhoods, (NR_THREAD_PER_BLOCK * 2)>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms");
//nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
/*
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem);
}
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
#endif
/*
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
wrpSz = PHY_WRP_SZ;
load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / wrpSz);
nr_of_block = (community_size + load_per_blk - 1) / load_per_blk;
//void computeInternals(int *indices, unsigned int *links, float *weights, int *n2c, float *in, unsigned int nrComms);
computeInternals << <nr_of_block, NR_THREAD_PER_BLOCK>>>(thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()), community_size, g.type);
}*/
//std::cout << "---------Now " << nb_moves << std::endl;
/*
std::cout << "#Moves (Total):" << nb_moves << " nrCforBlkSMem : " << nrCforBlkSMem << " nrCforBlkGMem :" << nrCforBlkGMem << std::endl;
if (0) {
std::cout << "After Traversing.............. " << std::endl;
print_vector(in, "IN:");
print_vector(tot, "TOT:");
print_vector(n2c_new, " n2c_new : ");
print_vector(tot_new, " tot_new : ");
}
*/
/*
if (0) {
float sum_in = thrust::reduce(in.begin(), in.end(), 0.0);
//float sum_tot = thrust::reduce(tot.begin(), tot.end(), 0.0);
thrust::host_vector<float> hvec = tot;
thrust::host_vector<float> hIN = in;
double stot = 0;
for (int i = 0; i < hvec.size(); i++)
stot += hvec[i] * hvec[i];
std::cout << "sin:" << sum_in << " stot: " << stot << std::endl;
//std::cout << " IN[0]: "<< hIN[0]<< " IN[1]: "<< hIN[1] << std::endl;
//std::cout << "sum_in = " << sum_in << " sum_tot = " << sum_tot << std::endl;
}
*/
new_mod = modularity(tot, in);
double scur_mod = cur_mod;
double snew_mod = new_mod;
/*
std::cout << nrIteration << " " << "Modularity " << cur_mod << " --> " << new_mod <<
" Gain: " << (new_mod - cur_mod) << std::endl;
*/
if ((new_mod - cur_mod) >= threshold) { // Mind this If condition
n2c_old = n2c;
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
cur_mod = new_mod;
if (cur_mod < init_mod) {
cur_mod = init_mod;
}
improvement = true;
} else {
//std::cout << "Break the loop " << std::endl;
break;
}
if (nrIteration)
std::cout << nrIteration << " " << "Modularity " << scur_mod << " --> "
<< snew_mod << " Gain: " << (snew_mod - scur_mod) << std::endl;
/*
if (verbose) {
print_vector(n2c, " n2c (After Swap): ");
print_vector(in, " in (After Swap): ");
print_vector(tot, " tot (After Swap): ");
}
*/
tot_new.clear();
t2 = clock();
float diff = (float)t2 - (float) t1;
float seconds = diff / CLOCKS_PER_SEC;
std::cout<< "iteration "<<(nrIteration+1)<<": "<<seconds<<" sec"<<std::endl;
} while (++nrIteration < 1000);
cardinalityOfComms.clear();
cardinalityOfComms_new.clear();
globalHashTable.clear();
hashTablePtrs.clear();
n2c = n2c_old;
n2c_old.clear();
//std::cout<<"#iteration: "<<nrIteration<<std::endl;
// print_vector(n2c, " n2c (Before contraction)");
/*
thrust::host_vector<int> hn2c = n2c;
std::ofstream ofs ("n2c.txt", std::ofstream::out);
for(int i=0; i< hn2c.size(); i++) {
ofs<<hn2c[i]<<" ";
}
ofs<<"\n";
ofs.close();
std::ofstream outfile ("n2c.txt",std::ofstream::binary);
outfile.write ((char*)&hn2c[0],sizeof(int)*hn2c.size());
char newline= '\n';
outfile.write ((char*)&newline,sizeof(char));
outfile.close();
*/
tot_new.clear();
g_next.indices.clear();
g_next.links.clear();
n2c_new.clear(); // <-----------
wDegs.clear();
//hipEventDestroy(start);
//hipEventDestroy(stop);
return cur_mod;
}
| f79e4a67f1ffb8d18ef25fc4cf038b4e5109c1ba.cu | // ***********************************************************************
//
// Rundemanen: CUDA C++ parallel program for community detection
// Md Naim ([email protected]), Fredrik Manne ([email protected])
// University of Bergen
//
// ***********************************************************************
//
// Copyright (2016) University of Bergen
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#include <algorithm>
#include <iostream>
#include "communityGPU.h"
#include"hostconstants.h"
#include"fstream"
#include <thrust/gather.h>
double Community::one_levelGaussSeidel(double init_mod, bool isLastRound,
int minSize, double easyThreshold, bool isGauss, cudaStream_t *streams,
int nrStreams, cudaEvent_t &start, cudaEvent_t &stop) {
//NOTE: cudaStream_t *streams was never used
std::cout << std::endl << " Inside method for modularity optimization ";
if (g.type == WEIGHTED) {
std::cout << "WEIGHTED Graph" << std::endl;
} else {
std::cout << "UnWeighted Graph" << std::endl;
}
bool hostPrint = false;
bool verbose = false;
int sc;
sc = 0; //std::cin>>sc;
if (sc > 1)
hostPrint = true;
if (sc > 0)
verbose = true;
/*
if (hostPrint) {
print_vector(g.indices, "indices: ");
std::cout << std::endl << "|indices|:" << g.indices.size() << std::endl;
}
*/
bool improvement = false;
int nb_moves;
double cur_mod = -1.0, new_mod = -1.0;
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
unsigned int nrIteration = 0;
cudaEventRecord(start, 0);
//Compute degree of each node
thrust::device_vector<int> sizesOfNhoods(g.indices.size() - 1, 0);
thrust::transform(g.indices.begin() + 1, g.indices.end(),
g.indices.begin(), sizesOfNhoods.begin(),
thrust::minus<int >());
assert(CAPACITY_FACTOR_DENOMINATOR >= CAPACITY_FACTOR_NUMERATOR);
// Filters for bins
// (-1) to hash the community id itself
int warpLimit = (WARP_TABLE_SIZE_1 * CAPACITY_FACTOR_NUMERATOR / CAPACITY_FACTOR_DENOMINATOR) - 1;
int blkSMemLimit = (SHARED_TABLE_SIZE * CAPACITY_FACTOR_NUMERATOR / CAPACITY_FACTOR_DENOMINATOR) - 1;
/*
std::cout << "warpLimit: " << warpLimit << " blkSMemLimit: " << blkSMemLimit << std::endl;
*/
IsGreaterThanLimit<int, int>filterBlkGMem(blkSMemLimit);
IsInRange<int, int> filterBlkSMem(warpLimit + 1, blkSMemLimit);
IsInRange<int, int> filterForWrp(33, warpLimit);
assert(warpLimit > 32);
IsInRange<int, int> filter_N_leq32(17, 32);
IsInRange<int, int> filter_N_leq16(9, 16);
IsInRange<int, int> filter_N_leq8(5, 8);
IsInRange<int, int> filter_N_leq4(1, 4);
IsInRange<int, int> filterForNone(0, 0); // node with no neighbors
//count #work for each bin
int nrCforBlkGMem = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterBlkGMem);
int nrCforBlkSMem = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterBlkSMem);
int nrCforWrp = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterForWrp);
int nrC_N_leq32 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq32);
int nrC_N_leq16 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq16);
int nrC_N_leq8 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq8);
int nrC_N_leq4 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq4);
int nrCforNone = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filterForNone);
/*
std::cout << "distribution: "<< nrC_N_leq4 <<" : "<< nrC_N_leq8<<" : "<< nrC_N_leq16<<" : "<<
nrC_N_leq32<<" : "<<nrCforWrp<<" : "<<nrCforBlkSMem<<" : "<<nrCforBlkGMem<<std::endl;
std::cout << "distribution: "<< (100*nrC_N_leq4)/community_size <<" : "<< (100*nrC_N_leq8)/community_size<<" : "<< (100*nrC_N_leq16)/community_size <<" : "<<
(100*nrC_N_leq32)/community_size<<" : "<<(100*nrCforWrp)/community_size<<" : "<<(100*nrCforBlkSMem)/community_size<<" : "<<(100*nrCforBlkGMem)/community_size <<std::endl;
*/
// Just for statistics
IsInRange<int, int> filter_N_leq64(33, 64);
IsInRange<int, int> filter_N_leq96(65, 96);
int nrC_N_leq64 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq64);
int nrC_N_leq96 = thrust::count_if(thrust::device, sizesOfNhoods.begin(), sizesOfNhoods.end(), filter_N_leq96);
int maxNrWrp = thrust::max(thrust::max(thrust::max(nrCforWrp, nrC_N_leq32), thrust::max(nrC_N_leq16, nrC_N_leq8)), nrC_N_leq4);
/*
if (1) {
std::cout << "-------> nrCforBlk[" << blkSMemLimit + 1 << ", -] : " << nrCforBlkGMem << std::endl;
std::cout << "-------> nrCforBlk[" << warpLimit + 1 << "," << blkSMemLimit << "] : " << nrCforBlkSMem << std::endl;
std::cout << "----------> nrCforWrp[ 33, " << warpLimit << "] : " << nrCforWrp << std::endl;
std::cout << "nrC_N_leq32 :" << nrC_N_leq32 << std::endl;
std::cout << "nrC_N_leq16 :" << nrC_N_leq16 << std::endl;
std::cout << "nrC_N_leq8 :" << nrC_N_leq8 << std::endl;
std::cout << "nrC_N_leq4 :" << nrC_N_leq4 << std::endl;
std::cout << "----------> nrCforNone[0,0] : " << nrCforNone << std::endl;
std::cout << "maxNrWrp :" << maxNrWrp << std::endl;
std::cout << "----------Statistics----------------" << std::endl;
std::cout << "nrC_N_leq64 :" << nrC_N_leq64 << std::endl;
std::cout << "nrC_N_leq96 :" << nrC_N_leq96 << std::endl;
}
*/
assert((nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8 + nrC_N_leq4 + nrCforNone) == community_size);
thrust::device_vector<int> movement_counters(maxNrWrp, 0);
//Lets copy Identities of all communities in g_next.links
g_next.links.resize(community_size, 0);
thrust::sequence(g_next.links.begin(), g_next.links.end(), 0);
//Use g_next.indices to copy community ids with decreasing sizes of neighborhood
g_next.indices.resize(community_size, -1);
//First community ids with larger neighborhoods
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(),
sizesOfNhoods.begin(), g_next.indices.begin(), filterBlkGMem);
// Then community ids with medium sized neighborhoods
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem, filterBlkSMem);
// Community ids with smaller neighborhoods
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem, filterForWrp);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp, filter_N_leq32);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32, filter_N_leq16);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16, filter_N_leq8);
thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), sizesOfNhoods.begin(),
g_next.indices.begin() + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8, filter_N_leq4);
///////////////////////////////////////////////////
// Now, use g_next.links to copy sizes of neighborhood according to order given by g_next.indices
g_next.links.resize(g_next.indices.size(), 0);
thrust::gather(thrust::device, g_next.indices.begin(), g_next.indices.end(), sizesOfNhoods.begin(), g_next.links.begin());
//Sort according to size of neighborhood ; only first nrCforBlkGbMem
thrust::sort_by_key(g_next.links.begin(), g_next.links.begin() + nrCforBlkGMem,
g_next.indices.begin(), thrust::greater<unsigned int>());
//////////Just to debug /////////////////
/*
if (0) {
thrust::host_vector<int> esSizes = sizesOfNhoods;
thrust::host_vector<int> bigCommunites = g_next.indices;
for (int k = 0; k < thrust::min<int>(bigCommunites.size(), 5); k++) {
std::cout << bigCommunites[k] << "::" << esSizes[bigCommunites[k]] << std::endl;
}
esSizes.clear();
bigCommunites.clear();
}
*/
///////////////////////////////Allocate data for Global HashTable////////////////////
int nrBlockForLargeNhoods = 90;
nrBlockForLargeNhoods = thrust::min(thrust::max(nrCforBlkGMem, nrCforBlkSMem), nrBlockForLargeNhoods);
thrust::device_vector<int> hashTablePtrs(nrBlockForLargeNhoods + 1, 0);
//g_next.links contains sizes of big neighborhoods
thrust::inclusive_scan(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods,
hashTablePtrs.begin() + 1, thrust::plus<int>());
thrust::device_vector<HashItem> globalHashTable(2 * hashTablePtrs.back());
int szHTmem = thrust::reduce(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods, (int) 0);
//std::cout << globalHashTable.size() << ":" << 2 * szHTmem << std::endl;
thrust::device_vector<int> moveCounters(nrBlockForLargeNhoods, 0);
////////////////////////////////////////////////////
unsigned int wrpSz = PHY_WRP_SZ;
int nr_of_block = 0;
//std::cout << " g.weight:(copied from host graph) " << g.total_weight << std::endl;
//////////////////////////////////////////////////////////////
n2c.resize(community_size);
thrust::sequence(n2c.begin(), n2c.end(), 0);
//std::cout << "community_size : " << community_size << " n2c.size : " << n2c.size() << std::endl;
thrust::device_vector< int> n2c_old(n2c.size(), -1);
assert(community_size == n2c.size());
g.total_weight = 0.0;
if (g.type == WEIGHTED) {
g.total_weight = thrust::reduce(thrust::device, g.weights.begin(), g.weights.end(), (double) 0, thrust::plus<double>());
} else {
g.total_weight = (double) g.nb_links;
}
report_time(start, stop, "FilterCopy&M");
//std::cout << " g.weight(computed in device): " << g.total_weight << std::endl;
thrust::device_vector< int> cardinalityOfComms(community_size, 1); // cardinality of each community
thrust::device_vector< int> cardinalityOfComms_new(community_size, 0); // cardinality of each community
thrust::device_vector<float> tot_new(community_size, 0.0);
thrust::device_vector<float> tot(community_size, 0.0);
thrust::device_vector<float> in(community_size, 0.0);
in.resize(community_size);
tot.resize(community_size);
tot_new.resize(community_size);
// n2c_new.clear();
n2c_new.resize(community_size);
/////////////////////////////////////////////////////////////
wrpSz = PHY_WRP_SZ;
int load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / wrpSz);
nr_of_block = (community_size + load_per_blk - 1) / load_per_blk;
thrust::device_vector<float> wDegs(community_size, 0.0);
cudaEventRecord(start, 0);
preComputeWdegs << <nr_of_block, NR_THREAD_PER_BLOCK>>>(thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(wDegs.data()),
g.type, community_size, wrpSz);
report_time(start, stop, "preComputeWdegs");
//////////////////////////////////////////////////////////////
wrpSz = PHY_WRP_SZ;
load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / wrpSz);
nr_of_block = (community_size + load_per_blk - 1) / load_per_blk;
int size_of_shared_memory = (2 * CHUNK_PER_WARP + 1)*(NR_THREAD_PER_BLOCK / wrpSz) * sizeof (int);
cudaEventRecord(start, 0);
initialize_in_tot << < nr_of_block, NR_THREAD_PER_BLOCK, size_of_shared_memory >>>(community_size,
thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(tot.data()),
NULL, thrust::raw_pointer_cast(n2c.data()), g.type, NULL, wrpSz,
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "initialize_in_tot");
//////////////////////////////////////
int loopCnt = 0;
double threshold = min_modularity;
if (community_size > minSize && isLastRound == false)
threshold = easyThreshold;
std::cout<<"Status:: community size - "<<community_size<<" threshold - "<<threshold<<std::endl;
// std::cout << "minSize: " << minSize << std::endl;
//NEVER set it to TRUE; it doesn't work!!!!!!!!!!!
bool isToUpdate = false; // true;
clock_t t1, t2;
do {
t1 = clock();
loopCnt++;
// std::cout << " ---------------------------- do-while ---------------------" << loopCnt << std::endl;
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
n2c_new = n2c; // MUST NEEDED Assignment
tot_new = tot;
cardinalityOfComms_new = cardinalityOfComms;
//thrust::fill_n(thrust::device, tot_new.begin(), tot_new.size(),0.0);
//thrust::fill_n(thrust::device, cardinalityOfComms_new.begin(), cardinalityOfComms_new.size(),0);
nb_moves = 0;
unsigned int bucketSizePerWarp = 0;
size_t sizeHashMem = 0;
// Initialize counters
movement_counters.clear();
movement_counters.resize(maxNrWrp, 0);
moveCounters.clear();
moveCounters.resize(nrBlockForLargeNhoods, 0);
if (nrCforBlkGMem > 0) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
cudaEventRecord(start, 0);
//std::cout<<" nrBlockForLargeNhoods: "<<nrBlockForLargeNhoods<<" nrCforBlkGMem: "<< nrCforBlkGMem<<std::endl;
lookAtNeigboringComms << <nrBlockForLargeNhoods, (NR_THREAD_PER_BLOCK * 2)>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms");
//nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
/*
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem);
}
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
/*
if (verbose) {
std::cout << "Before Traversing.............. " << std::endl;
print_vector(tot, "tot:");
}
if (verbose) {
std::cout << " community_size:" << community_size << std::endl;
std::cout << " g.indices:" << g.indices.size() << std::endl;
std::cout << " g.links:" << g.links.size() << std::endl;
std::cout << " g.weights:" << g.weights.size() << std::endl;
std::cout << " n2c:" << n2c.size() << std::endl;
std::cout << " in:" << in.size() << std::endl;
std::cout << " n2c_new:" << n2c_new.size() << std::endl;
std::cout << " tot_new:" << tot_new.size() << std::endl;
std::cout << " movement_counters: " << movement_counters.size() << std::endl;
std::cout << " g.total_weight: " << g.total_weight << std::endl;
}
if (verbose) {
nb_moves = thrust::reduce(movement_counters.begin(), movement_counters.end(), (int) 0);
std::cout << "---------*Now* " << nb_moves << std::endl;
}
*/
sc = 0; //std::cin>>sc;
//////////////////////////////////////////////////
/*
if (nrC_N_leq32) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrC_N_leq32 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 61; //MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
cudaEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm(<=32)");
nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq32, (int) 0);
if(0){
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
if (nrCforWrp) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrCforWrp + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = WARP_TABLE_SIZE_1;
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
cudaEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem,
nrCforWrp, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm");
nb_moves = thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrCforWrp, (int) 0);
// change community assignment of processed vertices
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem, // of these communities
nrCforWrp);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
if (nrCforBlkGMem > 0) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
cudaEventRecord(start, 0);
lookAtNeigboringComms << <nrBlockForLargeNhoods, (NR_THREAD_PER_BLOCK * 2)>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms");
nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
if (nrCforBlkSMem > 0) {
if (isGauss) {
thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
cudaEventRecord(start, 0);
lookAtNeigboringComms << <nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms(sh)");
nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem);
}
if (isGauss) {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
*/
//////////////////////////////////////////////////
if (nrC_N_leq8) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = QUARTER_WARP;
nr_of_block = (nrC_N_leq8 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 17; // MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
/*
if (0) {
std::cin>>sc;
print_vector(g.indices, "g.indices: ");
print_vector(g.links, "g.links: ");
print_vector(n2c, "n2c:");
print_vector(in, "in: ");
print_vector(tot, "tot:");
print_vector(n2c_new, "n2c_new:");
print_vector(tot_new, "tot_new:");
print_vector(movement_counters, "movement_counters:");
print_vector(g_next.indices, "g_next.indices:");
print_vector(devPrimes, "devPrimes:");
print_vector(cardinalityOfComms, "cardinalityOfComms:");
print_vector(cardinalityOfComms_new, "cardinalityOfComms_new:");
}
*/
cudaEventRecord(start, 0);
//print_vector(in, "in (*): ");
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16,
nrC_N_leq8, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
//print_vector(in, "in (*): ");
report_time(start, stop, "neigh_comm ( <=8)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq8, (int) 0);
/*
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16,
nrC_N_leq8);
*/
if (isGauss) {
if (isToUpdate) {
assert(community_size == n2c.size());
assert(community_size == n2c_new.size());
assert(community_size == tot.size());
assert(community_size == tot_new.size());
assert(community_size == cardinalityOfComms.size());
assert(community_size == cardinalityOfComms_new.size());
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrC_N_leq16) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = HALF_WARP;
nr_of_block = (nrC_N_leq16 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 31; // MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
cudaEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32,
nrC_N_leq16, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm ( <=16)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq16, (int) 0);
/*
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32,
nrC_N_leq16);
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrC_N_leq4) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = QUARTER_WARP / 2;
nr_of_block = (nrC_N_leq4 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 7; // MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
/*
if (0) {
std::cin>>sc;
print_vector(g.indices, "g.indices: ");
print_vector(g.links, "g.links: ");
print_vector(n2c, "n2c:");
print_vector(in, "in: ");
print_vector(tot, "tot:");
print_vector(n2c_new, "n2c_new:");
print_vector(tot_new, "tot_new:");
print_vector(movement_counters, "movement_counters:");
print_vector(g_next.indices, "g_next.indices:");
print_vector(devPrimes, "devPrimes:");
print_vector(cardinalityOfComms, "cardinalityOfComms:");
print_vector(cardinalityOfComms_new, "cardinalityOfComms_new:");
}
*/
cudaEventRecord(start, 0);
//print_vector(in, "in (*): ");
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8,
nrC_N_leq4, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
//print_vector(in, "in (*): ");
report_time(start, stop, "neigh_comm ( <=4)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq4, (int) 0);
/*
changeAssignment<<< nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp + nrC_N_leq32 + nrC_N_leq16 + nrC_N_leq8 ,
nrC_N_leq4);
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrC_N_leq32) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrC_N_leq32 + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = 61; //MUST BE PRIME
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
cudaEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm(<=32)");
//nb_moves = nb_moves + thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrC_N_leq32, (int) 0);
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem + nrCforWrp,
nrC_N_leq32);
}
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrCforWrp) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
nr_of_block = (nrCforWrp + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz);
bucketSizePerWarp = WARP_TABLE_SIZE_1;
sizeHashMem = (NR_THREAD_PER_BLOCK / wrpSz) * bucketSizePerWarp * sizeof (HashItem);
cudaEventRecord(start, 0);
neigh_comm << < nr_of_block, NR_THREAD_PER_BLOCK, sizeHashMem >>>(
community_size,
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(movement_counters.data()),
g.total_weight, bucketSizePerWarp,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem,
nrCforWrp, thrust::raw_pointer_cast(devPrimes.data()), nb_prime,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
wrpSz, thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "neigh_comm");
//nb_moves = thrust::reduce(movement_counters.begin(), movement_counters.begin() + nrCforWrp, (int) 0);
// change community assignment of processed vertices
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem + nrCforBlkSMem, // of these communities
nrCforWrp);
}
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
if (nrCforBlkSMem > 0) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
cudaEventRecord(start, 0);
//std::cout<<" nrBlockForLargeNhoods :"<<nrBlockForLargeNhoods<<" nrCforBlkSMem: "<< nrCforBlkSMem<<std::endl;
lookAtNeigboringComms << <nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms(sh)");
//nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
/*
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGMem, nrCforBlkSMem);
}
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
#ifdef LARGE_LATER
if (nrCforBlkGMem > 0) {
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
//tot_new = tot;
//cardinalityOfComms_new = cardinalityOfComms;
}
wrpSz = PHY_WRP_SZ;
cudaEventRecord(start, 0);
//std::cout<<" nrBlockForLargeNhoods: "<<nrBlockForLargeNhoods<<" nrCforBlkGMem: "<< nrCforBlkGMem<<std::endl;
lookAtNeigboringComms << <nrBlockForLargeNhoods, (NR_THREAD_PER_BLOCK * 2)>>>(
thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()),
thrust::raw_pointer_cast(tot.data()), g.type,
thrust::raw_pointer_cast(n2c_new.data()),
NULL,
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(moveCounters.data()), g.total_weight,
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem,
thrust::raw_pointer_cast(globalHashTable.data()),
thrust::raw_pointer_cast(hashTablePtrs.data()),
thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz,
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()),
thrust::raw_pointer_cast(wDegs.data()));
report_time(start, stop, "lookAtNeigboringComms");
//nb_moves = nb_moves + thrust::reduce(moveCounters.begin(), moveCounters.begin() + nrBlockForLargeNhoods, (int) 0);
/*
if (0) {
changeAssignment << < nr_of_block, NR_THREAD_PER_BLOCK>>>(
thrust::raw_pointer_cast(n2c.data()), // change from
thrust::raw_pointer_cast(n2c_new.data()), // change to
thrust::raw_pointer_cast(g_next.indices.data()), nrCforBlkGMem);
}
*/
if (isGauss) {
if (isToUpdate) {
nr_of_block = (community_size + NR_THREAD_PER_BLOCK - 1) / NR_THREAD_PER_BLOCK;
update << <nr_of_block, NR_THREAD_PER_BLOCK>>>(community_size,
thrust::raw_pointer_cast(tot.data()),
thrust::raw_pointer_cast(tot_new.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(n2c_new.data()),
thrust::raw_pointer_cast(cardinalityOfComms.data()),
thrust::raw_pointer_cast(cardinalityOfComms_new.data()));
} else {
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
}
}
}
#endif
/*
if (isGauss) {
//thrust::fill_n(thrust::device, in.begin(), in.size(), 0.0); // initialize in to all zeros '0'
wrpSz = PHY_WRP_SZ;
load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / wrpSz);
nr_of_block = (community_size + load_per_blk - 1) / load_per_blk;
//void computeInternals(int *indices, unsigned int *links, float *weights, int *n2c, float *in, unsigned int nrComms);
computeInternals << <nr_of_block, NR_THREAD_PER_BLOCK>>>(thrust::raw_pointer_cast(g.indices.data()),
thrust::raw_pointer_cast(g.links.data()),
thrust::raw_pointer_cast(g.weights.data()),
thrust::raw_pointer_cast(n2c.data()),
thrust::raw_pointer_cast(in.data()), community_size, g.type);
}*/
//std::cout << "---------Now " << nb_moves << std::endl;
/*
std::cout << "#Moves (Total):" << nb_moves << " nrCforBlkSMem : " << nrCforBlkSMem << " nrCforBlkGMem :" << nrCforBlkGMem << std::endl;
if (0) {
std::cout << "After Traversing.............. " << std::endl;
print_vector(in, "IN:");
print_vector(tot, "TOT:");
print_vector(n2c_new, " n2c_new : ");
print_vector(tot_new, " tot_new : ");
}
*/
/*
if (0) {
float sum_in = thrust::reduce(in.begin(), in.end(), 0.0);
//float sum_tot = thrust::reduce(tot.begin(), tot.end(), 0.0);
thrust::host_vector<float> hvec = tot;
thrust::host_vector<float> hIN = in;
double stot = 0;
for (int i = 0; i < hvec.size(); i++)
stot += hvec[i] * hvec[i];
std::cout << "sin:" << sum_in << " stot: " << stot << std::endl;
//std::cout << " IN[0]: "<< hIN[0]<< " IN[1]: "<< hIN[1] << std::endl;
//std::cout << "sum_in = " << sum_in << " sum_tot = " << sum_tot << std::endl;
}
*/
new_mod = modularity(tot, in);
double scur_mod = cur_mod;
double snew_mod = new_mod;
/*
std::cout << nrIteration << " " << "Modularity " << cur_mod << " --> " << new_mod <<
" Gain: " << (new_mod - cur_mod) << std::endl;
*/
if ((new_mod - cur_mod) >= threshold) { // Mind this If condition
n2c_old = n2c;
n2c = n2c_new;
tot = tot_new;
cardinalityOfComms = cardinalityOfComms_new;
cur_mod = new_mod;
if (cur_mod < init_mod) {
cur_mod = init_mod;
}
improvement = true;
} else {
//std::cout << "Break the loop " << std::endl;
break;
}
if (nrIteration)
std::cout << nrIteration << " " << "Modularity " << scur_mod << " --> "
<< snew_mod << " Gain: " << (snew_mod - scur_mod) << std::endl;
/*
if (verbose) {
print_vector(n2c, " n2c (After Swap): ");
print_vector(in, " in (After Swap): ");
print_vector(tot, " tot (After Swap): ");
}
*/
tot_new.clear();
t2 = clock();
float diff = (float)t2 - (float) t1;
float seconds = diff / CLOCKS_PER_SEC;
std::cout<< "iteration "<<(nrIteration+1)<<": "<<seconds<<" sec"<<std::endl;
} while (++nrIteration < 1000);
cardinalityOfComms.clear();
cardinalityOfComms_new.clear();
globalHashTable.clear();
hashTablePtrs.clear();
n2c = n2c_old;
n2c_old.clear();
//std::cout<<"#iteration: "<<nrIteration<<std::endl;
// print_vector(n2c, " n2c (Before contraction)");
/*
thrust::host_vector<int> hn2c = n2c;
std::ofstream ofs ("n2c.txt", std::ofstream::out);
for(int i=0; i< hn2c.size(); i++) {
ofs<<hn2c[i]<<" ";
}
ofs<<"\n";
ofs.close();
std::ofstream outfile ("n2c.txt",std::ofstream::binary);
outfile.write ((char*)&hn2c[0],sizeof(int)*hn2c.size());
char newline= '\n';
outfile.write ((char*)&newline,sizeof(char));
outfile.close();
*/
tot_new.clear();
g_next.indices.clear();
g_next.links.clear();
n2c_new.clear(); // <-----------
wDegs.clear();
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
return cur_mod;
}
|
b7145fdd24105be3843c3d5960fad211f809c59b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "crop_layer.h"
#include "utils.h"
#include "hip/hip_runtime.h"
#include "image.h"
}
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) {
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb) {
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0) {
s = 0;
h = -1;
} else {
s = delta/max;
if(r == max) {
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv) {
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0) {
r = v; g = t; b = p;
} else if(index == 1) {
r = q; g = v; b = p;
} else if(index == 2) {
r = p; g = v; b = t;
} else if(index == 3) {
r = p; g = q; b = v;
} else if(index == 4) {
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) {
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) {
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5f) ? 1.f/saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5f) ? 1.f/exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train) {
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5f)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5f)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5f)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.f;
float cy = h/2.f;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5f));
angle = 2*angle*r7 - angle;
if(!train) {
dw = (w - crop_width)/2.f;
dh = (h - crop_height)/2.f;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cosf(angle)*(x-cx) - sinf(angle)*(y-cy) + cx;
float ry = sinf(angle)*(x-cx) + cosf(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
extern "C" void forward_crop_layer_gpu(crop_layer layer, network net) {
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265f/180.f;
float scale = 2;
float translate = -1;
if(layer.noadjust) {
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
check_error(hipPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu);
check_error(hipPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
| b7145fdd24105be3843c3d5960fad211f809c59b.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "crop_layer.h"
#include "utils.h"
#include "cuda.h"
#include "image.h"
}
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) {
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb) {
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0) {
s = 0;
h = -1;
} else {
s = delta/max;
if(r == max) {
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv) {
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0) {
r = v; g = t; b = p;
} else if(index == 1) {
r = q; g = v; b = p;
} else if(index == 2) {
r = p; g = v; b = t;
} else if(index == 3) {
r = p; g = q; b = v;
} else if(index == 4) {
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) {
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) {
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5f) ? 1.f/saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5f) ? 1.f/exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train) {
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5f)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5f)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5f)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.f;
float cy = h/2.f;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5f));
angle = 2*angle*r7 - angle;
if(!train) {
dw = (w - crop_width)/2.f;
dh = (h - crop_height)/2.f;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cosf(angle)*(x-cx) - sinf(angle)*(y-cy) + cx;
float ry = sinf(angle)*(x-cx) + cosf(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
extern "C" void forward_crop_layer_gpu(crop_layer layer, network net) {
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265f/180.f;
float scale = 2;
float translate = -1;
if(layer.noadjust) {
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
check_error(cudaPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu);
check_error(cudaPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
|
a443e642c4d318b12ffdb6f3f71660d27c264041.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <algorithm>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h"
#include "paddle/phi/kernels/graph_send_recv_kernel.h"
namespace phi {
template <typename Context, typename T, typename IndexT>
void GraphSendRecvOpCUDAKernelLaunchHelper(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
int64_t out_size,
DenseTensor* out,
DenseTensor* dst_count = nullptr) {
const int& index_size = src_index.dims()[0];
ctx.template Alloc<T>(out);
T* p_output = out->data<T>();
const auto& src_dims = x.dims();
int64_t memset_size = 1;
if (out_size <= 0) {
for (int i = 0; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
} else {
memset_size = out_size;
for (int i = 1; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
}
const size_t& memset_bytes = memset_size * sizeof(T);
if (pool_type == "SUM" || pool_type == "MEAN") {
#ifdef PADDLE_WITH_HIP
hipMemset(p_output, 0, memset_bytes);
#else
hipMemset(p_output, 0, memset_bytes);
#endif
} else if (pool_type == "MAX") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device,
p_output_ptr,
p_output_ptr + memset_size,
std::numeric_limits<T>::min());
} else if (pool_type == "MIN") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device,
p_output_ptr,
p_output_ptr + memset_size,
std::numeric_limits<T>::max());
}
if (index_size == 0) return;
int64_t slice_size = 1;
for (int i = 1; i < src_dims.size(); ++i) {
slice_size *= src_dims[i];
}
const T* p_src = x.data<T>();
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int64_t n = slice_size * index_size;
int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0];
int64_t grid_tmp = (n + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
int64_t input_size = src_dims[0];
if (pool_type == "SUM") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>)
, dim3(grid), dim3(block), 0, ctx.stream(),
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
} else if (pool_type == "MAX") {
GraphSendRecvMaxCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvMaxCUDAFunctor<T, IndexT>>)
, dim3(grid), dim3(block), 0, ctx.stream(),
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
if (out_size > 0) {
input_size = out_size;
}
int64_t grid_max_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_max =
grid_max_tmp < max_grid_dimx ? grid_max_tmp : max_grid_dimx;
hipLaunchKernelGGL(( InputResetMaxCUDAKernel<T>), dim3(grid_max), dim3(block), 0, ctx.stream(),
p_output, input_size, slice_size);
} else if (pool_type == "MIN") {
GraphSendRecvMinCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvMinCUDAFunctor<T, IndexT>>)
, dim3(grid), dim3(block), 0, ctx.stream(),
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
if (out_size > 0) {
input_size = out_size;
}
int64_t grid_min_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_min =
grid_min_tmp < max_grid_dimx ? grid_min_tmp : max_grid_dimx;
hipLaunchKernelGGL(( InputResetMinCUDAKernel<T>), dim3(grid_min), dim3(block), 0, ctx.stream(),
p_output, input_size, slice_size);
} else if (pool_type == "MEAN") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>)
, dim3(grid), dim3(block), 0, ctx.stream(),
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
ctx.template Alloc<int32_t>(dst_count);
int32_t* p_dst_count = dst_count->data<int32_t>();
if (out_size > 0) {
input_size = out_size;
}
#ifdef PADDLE_WITH_HIP
hipMemset(p_dst_count, 0, input_size * sizeof(int));
#else
hipMemset(p_dst_count, 0, input_size * sizeof(int));
#endif
int64_t grid_count = (index_size + block - 1) / block;
hipLaunchKernelGGL(( ComputeCountCUDAKernel<T, IndexT>), dim3(grid_count), dim3(block), 0, ctx.stream(),
p_dst_count, d_index, index_size);
int64_t grid_mean_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_mean =
grid_mean_tmp < max_grid_dimx ? grid_mean_tmp : max_grid_dimx;
hipLaunchKernelGGL(( ManipulateMeanCUDAKernel<T>), dim3(grid_mean), dim3(block), 0, ctx.stream(),
p_output, p_dst_count, input_size, slice_size);
}
}
template <typename T, typename Context>
void GraphSendRecvKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
int64_t out_size,
DenseTensor* out,
DenseTensor* dst_count) {
auto index_type = src_index.dtype();
if (index_type == phi::DataType::INT32) {
GraphSendRecvOpCUDAKernelLaunchHelper<Context, T, int32_t>(
ctx, x, src_index, dst_index, pool_type, out_size, out, dst_count);
} else if (index_type == phi::DataType::INT64) {
GraphSendRecvOpCUDAKernelLaunchHelper<Context, T, int64_t>(
ctx, x, src_index, dst_index, pool_type, out_size, out, dst_count);
}
}
} // namespace phi
PD_REGISTER_KERNEL(graph_send_recv,
GPU,
ALL_LAYOUT,
phi::GraphSendRecvKernel,
float,
double,
int,
int64_t) {}
| a443e642c4d318b12ffdb6f3f71660d27c264041.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <algorithm>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h"
#include "paddle/phi/kernels/graph_send_recv_kernel.h"
namespace phi {
template <typename Context, typename T, typename IndexT>
void GraphSendRecvOpCUDAKernelLaunchHelper(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
int64_t out_size,
DenseTensor* out,
DenseTensor* dst_count = nullptr) {
const int& index_size = src_index.dims()[0];
ctx.template Alloc<T>(out);
T* p_output = out->data<T>();
const auto& src_dims = x.dims();
int64_t memset_size = 1;
if (out_size <= 0) {
for (int i = 0; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
} else {
memset_size = out_size;
for (int i = 1; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
}
const size_t& memset_bytes = memset_size * sizeof(T);
if (pool_type == "SUM" || pool_type == "MEAN") {
#ifdef PADDLE_WITH_HIP
hipMemset(p_output, 0, memset_bytes);
#else
cudaMemset(p_output, 0, memset_bytes);
#endif
} else if (pool_type == "MAX") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device,
p_output_ptr,
p_output_ptr + memset_size,
std::numeric_limits<T>::min());
} else if (pool_type == "MIN") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device,
p_output_ptr,
p_output_ptr + memset_size,
std::numeric_limits<T>::max());
}
if (index_size == 0) return;
int64_t slice_size = 1;
for (int i = 1; i < src_dims.size(); ++i) {
slice_size *= src_dims[i];
}
const T* p_src = x.data<T>();
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int64_t n = slice_size * index_size;
int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0];
int64_t grid_tmp = (n + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
int64_t input_size = src_dims[0];
if (pool_type == "SUM") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>
<<<grid, block, 0, ctx.stream()>>>(
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
} else if (pool_type == "MAX") {
GraphSendRecvMaxCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvMaxCUDAFunctor<T, IndexT>>
<<<grid, block, 0, ctx.stream()>>>(
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
if (out_size > 0) {
input_size = out_size;
}
int64_t grid_max_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_max =
grid_max_tmp < max_grid_dimx ? grid_max_tmp : max_grid_dimx;
InputResetMaxCUDAKernel<T><<<grid_max, block, 0, ctx.stream()>>>(
p_output, input_size, slice_size);
} else if (pool_type == "MIN") {
GraphSendRecvMinCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvMinCUDAFunctor<T, IndexT>>
<<<grid, block, 0, ctx.stream()>>>(
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
if (out_size > 0) {
input_size = out_size;
}
int64_t grid_min_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_min =
grid_min_tmp < max_grid_dimx ? grid_min_tmp : max_grid_dimx;
InputResetMinCUDAKernel<T><<<grid_min, block, 0, ctx.stream()>>>(
p_output, input_size, slice_size);
} else if (pool_type == "MEAN") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>
<<<grid, block, 0, ctx.stream()>>>(
p_src, s_index, d_index, p_output, index_size, slice_size, functor);
ctx.template Alloc<int32_t>(dst_count);
int32_t* p_dst_count = dst_count->data<int32_t>();
if (out_size > 0) {
input_size = out_size;
}
#ifdef PADDLE_WITH_HIP
hipMemset(p_dst_count, 0, input_size * sizeof(int));
#else
cudaMemset(p_dst_count, 0, input_size * sizeof(int));
#endif
int64_t grid_count = (index_size + block - 1) / block;
ComputeCountCUDAKernel<T, IndexT><<<grid_count, block, 0, ctx.stream()>>>(
p_dst_count, d_index, index_size);
int64_t grid_mean_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_mean =
grid_mean_tmp < max_grid_dimx ? grid_mean_tmp : max_grid_dimx;
ManipulateMeanCUDAKernel<T><<<grid_mean, block, 0, ctx.stream()>>>(
p_output, p_dst_count, input_size, slice_size);
}
}
template <typename T, typename Context>
void GraphSendRecvKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
int64_t out_size,
DenseTensor* out,
DenseTensor* dst_count) {
auto index_type = src_index.dtype();
if (index_type == phi::DataType::INT32) {
GraphSendRecvOpCUDAKernelLaunchHelper<Context, T, int32_t>(
ctx, x, src_index, dst_index, pool_type, out_size, out, dst_count);
} else if (index_type == phi::DataType::INT64) {
GraphSendRecvOpCUDAKernelLaunchHelper<Context, T, int64_t>(
ctx, x, src_index, dst_index, pool_type, out_size, out, dst_count);
}
}
} // namespace phi
PD_REGISTER_KERNEL(graph_send_recv,
GPU,
ALL_LAYOUT,
phi::GraphSendRecvKernel,
float,
double,
int,
int64_t) {}
|
11defeabc08da3c8ef91d4a5fef427f2945bcbf7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_computePSF_signalNsqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *fft = NULL;
hipMalloc(&fft, XSIZE*YSIZE);
double divide = 1;
int *sparseIndexEvenShift2D = NULL;
hipMalloc(&sparseIndexEvenShift2D, XSIZE*YSIZE);
int *sparseIndexOddShift2D = NULL;
hipMalloc(&sparseIndexOddShift2D, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_computePSF_signalNsqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,fft,divide,sparseIndexEvenShift2D,sparseIndexOddShift2D);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_computePSF_signalNsqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,fft,divide,sparseIndexEvenShift2D,sparseIndexOddShift2D);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_computePSF_signalNsqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,fft,divide,sparseIndexEvenShift2D,sparseIndexOddShift2D);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 11defeabc08da3c8ef91d4a5fef427f2945bcbf7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_computePSF_signalNsqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *fft = NULL;
cudaMalloc(&fft, XSIZE*YSIZE);
double divide = 1;
int *sparseIndexEvenShift2D = NULL;
cudaMalloc(&sparseIndexEvenShift2D, XSIZE*YSIZE);
int *sparseIndexOddShift2D = NULL;
cudaMalloc(&sparseIndexOddShift2D, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_computePSF_signalNsqrt<<<gridBlock,threadBlock>>>(n,result,fft,divide,sparseIndexEvenShift2D,sparseIndexOddShift2D);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_computePSF_signalNsqrt<<<gridBlock,threadBlock>>>(n,result,fft,divide,sparseIndexEvenShift2D,sparseIndexOddShift2D);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_computePSF_signalNsqrt<<<gridBlock,threadBlock>>>(n,result,fft,divide,sparseIndexEvenShift2D,sparseIndexOddShift2D);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ad3b7df54d7b6c10aec1b9259a946a643cb30678.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* multNoShare.c
*
*/
#include "matrix_hip.cuh"
#include <stdio.h>
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
// Parallel multiplication of Matrices
__global__ void ChainMatMulKernel(Matrix* Chain, int* Muls, Matrix* IntRes) {
int threadId = blockIdx.x;
int row = blockIdx.y;
int col = blockIdx.z;
int Cvalue = 0;
int mulNum = Muls[threadId];
if(row >= Chain[mulNum].height || col >= Chain[mulNum + 1].width) return;
for (int e = 0; e < Chain[mulNum].width; ++e){
Cvalue += ((Chain[mulNum].elements[row * Chain[mulNum].width + e]) * (Chain[mulNum + 1].elements[e * Chain[mulNum + 1].width + col]));
Cvalue = Cvalue % 256;
}
IntRes[threadId].elements[row * IntRes[threadId].width + col] = Cvalue;
}
void SequentialSelectionSortDouble(int* array, int* arrayOrder, int n) {
int position, swap, swapOrder, d, c;
for(c = 0 ; c < ( n - 1 ) ; c++) {
position = c;
for ( d = c + 1 ; d < n ; d++ ) {
if ( array[position] < array[d] )
position = d;
}
if(position != c) {
swap = array[c];
swapOrder = arrayOrder[c];
array[c] = array[position];
arrayOrder[c] = arrayOrder[position];
array[position] = swap;
arrayOrder[position] = swapOrder;
}
}
}
void SequentialSelectionSort(int* array, int n) {
int position, swap, d, c;
for(c = 0 ; c < ( n - 1 ) ; c++) {
position = c;
for ( d = c + 1 ; d < n ; d++ ) {
if ( array[position] > array[d] )
position = d;
}
if(position != c) {
swap = array[c];
array[c] = array[position];
array[position] = swap;
}
}
}
Matrix ChainMatMul(Matrix* Chain, int numMats) {
int n = numMats;
Matrix Result;
Matrix* h_Chain; // Only elements on device
Matrix* d_Chain; // Array fully on device
Matrix* h_IntRes; // Only elements on device
Matrix* d_IntRes; // Array fully on device
int* ChainDims;
int* ChainDimOrder;
int numDims;
int* h_muls; // Array on host
int* d_muls; // Array on device
int numMuls = 0;
h_Chain = (Matrix*)malloc(n*sizeof(Matrix));
size_t size;
hipError_t err;
// Transfer from Chain to h_Chain
for(int i = 0; i < n;++i) {
h_Chain[i].width = Chain[i].width;
h_Chain[i].height = Chain[i].height;
size = h_Chain[i].width * h_Chain[i].height * sizeof(int);
err = hipMalloc(&h_Chain[i].elements, size);
//printf("CUDA malloc Chain[%d].elements: %s\n", i, hipGetErrorString(err));
err = hipMemcpy(h_Chain[i].elements, Chain[i].elements, size, hipMemcpyHostToDevice);
//printf("Copy Chain[%d].elements to device: %s\n", i, hipGetErrorString(err));
}
// Trasfer from h_Chain to d_Chain
size = n * sizeof(Matrix);
err = hipMalloc(&d_Chain, size);
//printf("CUDA malloc Chain: %s\n", hipGetErrorString(err));
err = hipMemcpy(d_Chain, h_Chain, size, hipMemcpyHostToDevice);
while (n > 1) {
// ************************** Find optimal multiplications ******************
// Fill up ChainDims
numDims = n - 1;
numMuls = 0;
ChainDims = (int*)malloc(numDims * sizeof(int));
ChainDimOrder = (int*)malloc(numDims * sizeof(int));
h_muls = (int*)malloc(numDims * sizeof(int));
for(int i = 0; i < numDims; ++i) {
ChainDims[i] = h_Chain[i].width;
ChainDimOrder[i] = i;
}
// Sort ChainDims
SequentialSelectionSortDouble(ChainDims, ChainDimOrder, numDims);
// Select muls
for(int i = 0, j = 0;i < numDims; ++i) {
if(ChainDims[i] != 0 && (numMuls < 1024)) {
h_muls[j] = ChainDimOrder[i];
numMuls++;
j++;
for(int k = 0; k < numDims; k++){
if(ChainDimOrder[k] == (ChainDimOrder[i] + 1) || ChainDimOrder[k] == (ChainDimOrder[i] - 1)) {
ChainDims[k] = 0;
}
}
}
}
free(ChainDims);
free(ChainDimOrder);
SequentialSelectionSort(h_muls, numMuls);
printf("\nMultiplication choices : ");
for(int i = 0; i < numMuls; ++i) {
printf("Mat%d x Mat%d\t", h_muls[i], (h_muls[i]+1));
}
printf("\n");
// **************************************************************************
// ********************** Transfer stuff to Device **************************
// Transfer muls on device
err = hipMalloc(&d_muls, numMuls * sizeof(int));
//printf("CUDA malloc Muls: %s\n", hipGetErrorString(err));
err = hipMemcpy(d_muls, h_muls, numMuls * sizeof(int), hipMemcpyHostToDevice);
//printf("Copy Muls to device: %s\n", hipGetErrorString(err));
// Hold intermediate results on host with elements on device
h_IntRes = (Matrix*)malloc(numMuls * sizeof(Matrix));
// Allocate memory on device for the elements of h_IntRes
for(int i = 0; i < numMuls; ++i) {
h_IntRes[i].height = h_Chain[h_muls[i]].height;
h_IntRes[i].width = h_Chain[h_muls[i] + 1].width;
size_t size = h_IntRes[i].width * h_IntRes[i].height * sizeof(int);
err = hipMalloc(&h_IntRes[i].elements, size);
//printf("CUDA malloc IntRes[%d]: %s\n", i, hipGetErrorString(err));
}
// IntRes Fully on device
size = numMuls * sizeof(Matrix);
err = hipMalloc(&d_IntRes, size);
//printf("CUDA malloc Chain: %s\n", hipGetErrorString(err));
err = hipMemcpy(d_IntRes, h_IntRes, size, hipMemcpyHostToDevice);
//printf("Copy Chain to device: %s\n", hipGetErrorString(err));
// **************************************************************************
// *************************** Actual Multiplication ************************
dim3 dimGrid(numMuls, 256, 256);
// Call to the kernel
hipLaunchKernelGGL(( ChainMatMulKernel), dim3(dimGrid), dim3(1), 0, 0, d_Chain, d_muls, d_IntRes);
err = hipDeviceSynchronize();
//printf("Run kernel: %s\n", hipGetErrorString(err));
// **************************************************************************
// ************************** Readying for next cycle ***********************
// Update chain
for(int i = 0; i < numMuls;++i) {
// Free device memory
hipFree(h_Chain[h_muls[i]].elements);
hipFree(h_Chain[h_muls[i] + 1].elements);
// Update the chain
h_Chain[h_muls[i]].height = h_IntRes[i].height;
h_Chain[h_muls[i]].width = h_IntRes[i].width;
h_Chain[h_muls[i]].elements = h_IntRes[i].elements;
}
// Reduce the size of the h_Chain array
for(int i = 0; i < numMuls; ++i){
h_Chain[h_muls[i]+1].width = 0;
h_Chain[h_muls[i]+1].height = 0;
}
for(int i = 0, j =0; i < n; ++i) {
if(h_Chain[i+j].width == 0) {
j++;
n--;
}
h_Chain[i].width = h_Chain[i + j].width;
h_Chain[i].height = h_Chain[i + j].height;
h_Chain[i].elements = h_Chain[i + j].elements;
}
// Small memory leak here - (but removing this is difficult)
// Refresh d_Chain
hipFree(d_Chain);
size = n * sizeof(Matrix);
err = hipMalloc(&d_Chain, size);
//printf("CUDA malloc Chain: %s\n", hipGetErrorString(err));
err = hipMemcpy(d_Chain, h_Chain, size, hipMemcpyHostToDevice);
//printf("Copy Chain to device: %s\n", hipGetErrorString(err));
// Free stuff
free(h_muls);
hipFree(d_muls);
free(h_IntRes);
hipFree(d_IntRes);
// **************************************************************************
}
// Read Result from device memory
Result.width = h_Chain[0].width;
Result.height = h_Chain[0].height;
size = Result.width * Result.height * sizeof(int);
Result.elements = (int*)malloc(size);
err = hipMemcpy(Result.elements, h_Chain[0].elements, size, hipMemcpyDeviceToHost);
//printf("Copy Result off of device: %s\n",hipGetErrorString(err));
hipFree(h_Chain[0].elements);
hipFree(d_Chain);
free(h_Chain);
return Result;
}
// Usage: multNoShare a1 a2 b2
int main(int argc, char* argv[]){
Matrix* Chain;
Matrix Result;
int* dims;
if(argc != 2) {
printf("Please input in the following format\n multNoShare.out [#FileName] \n");
return 0;
}
char const* const fileName = argv[1]; /* should check that argc > 1 */
FILE* file = fopen(fileName, "r"); /* should check the result */
char nLine[10];
char line[256 * 5];
int lineNums[256];
// Read values from file
int n = atoi(fgets(nLine, sizeof(nLine), file)) - 1;
int sizeDim = sizeof(char) * (n+1) * 5;
char* dimLine = (char *)malloc(sizeDim);
fgets(dimLine, sizeof(char) * (n+1) * 5, file);
char oneNum[5];
dims = (int *) malloc((n+1)*sizeof(int));
for(int i = 0, k = 0; dimLine[i] != '\0' ;++i) {
int j = 0;
for(; dimLine[i] != ' ' && dimLine[i]!= '\0'; ++j,++i){
oneNum[j] = dimLine[i];
}
oneNum[j] = '\0';
dims[k++] = atoi(oneNum);
}
Chain = (Matrix *) malloc(n*sizeof(Matrix));
for(int i = 0; i < n; ++i) {
Chain[i].height = dims[i];
Chain[i].width = dims[i+1];
Chain[i].elements = (int*)malloc(Chain[i].width * Chain[i].height * sizeof(int));
}
for(int k = 0; k < n; ++k)
for(int i = 0; i < Chain[k].height; i++){
fgets(line, sizeof(line), file);
for(int p = 0, q = 0; line[p] != '\0' ;++p) {
int r = 0;
for(; line[p] != ' ' && line[p]!= '\0'; ++r,++p){
oneNum[r] = line[p];
}
oneNum[r] = '\0';
lineNums[q++] = atoi(oneNum);
}
for(int j = 0; j < Chain[k].width; j++) {
Chain[k].elements[i*Chain[k].width + j] = lineNums[j];
}
}
fclose(file);
printf("Print up to a 10x10 portion of the matrices - to avoid clutter");
for(int k = 0; k < n; ++k) {
printf("\n Chain[%d] : %d x %d\n", k, Chain[k].height, Chain[k].width);
for(int i = 0; i < min(10, Chain[k].height); i++){
for(int j = 0; j < min(10, Chain[k].width); j++)
printf("%d ", Chain[k].elements[i*Chain[k].width + j]);
printf("\n");
}
}
printf("\n");
Result = ChainMatMul(Chain, n);
// Print up to a 10x10 portion of the Result
printf("\n Result : %d x %d\n", Result.height, Result.width);
for(int i = 0; i < min(10, Result.height); i++){
for(int j = 0; j < min(10, Result.width); j++)
printf("%d ", Result.elements[i*Result.width + j]);
printf("\n");
}
}
| ad3b7df54d7b6c10aec1b9259a946a643cb30678.cu | /*
* multNoShare.c
*
*/
#include "matrix.cuh"
#include <stdio.h>
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
// Parallel multiplication of Matrices
__global__ void ChainMatMulKernel(Matrix* Chain, int* Muls, Matrix* IntRes) {
int threadId = blockIdx.x;
int row = blockIdx.y;
int col = blockIdx.z;
int Cvalue = 0;
int mulNum = Muls[threadId];
if(row >= Chain[mulNum].height || col >= Chain[mulNum + 1].width) return;
for (int e = 0; e < Chain[mulNum].width; ++e){
Cvalue += ((Chain[mulNum].elements[row * Chain[mulNum].width + e]) * (Chain[mulNum + 1].elements[e * Chain[mulNum + 1].width + col]));
Cvalue = Cvalue % 256;
}
IntRes[threadId].elements[row * IntRes[threadId].width + col] = Cvalue;
}
void SequentialSelectionSortDouble(int* array, int* arrayOrder, int n) {
int position, swap, swapOrder, d, c;
for(c = 0 ; c < ( n - 1 ) ; c++) {
position = c;
for ( d = c + 1 ; d < n ; d++ ) {
if ( array[position] < array[d] )
position = d;
}
if(position != c) {
swap = array[c];
swapOrder = arrayOrder[c];
array[c] = array[position];
arrayOrder[c] = arrayOrder[position];
array[position] = swap;
arrayOrder[position] = swapOrder;
}
}
}
void SequentialSelectionSort(int* array, int n) {
int position, swap, d, c;
for(c = 0 ; c < ( n - 1 ) ; c++) {
position = c;
for ( d = c + 1 ; d < n ; d++ ) {
if ( array[position] > array[d] )
position = d;
}
if(position != c) {
swap = array[c];
array[c] = array[position];
array[position] = swap;
}
}
}
Matrix ChainMatMul(Matrix* Chain, int numMats) {
int n = numMats;
Matrix Result;
Matrix* h_Chain; // Only elements on device
Matrix* d_Chain; // Array fully on device
Matrix* h_IntRes; // Only elements on device
Matrix* d_IntRes; // Array fully on device
int* ChainDims;
int* ChainDimOrder;
int numDims;
int* h_muls; // Array on host
int* d_muls; // Array on device
int numMuls = 0;
h_Chain = (Matrix*)malloc(n*sizeof(Matrix));
size_t size;
cudaError_t err;
// Transfer from Chain to h_Chain
for(int i = 0; i < n;++i) {
h_Chain[i].width = Chain[i].width;
h_Chain[i].height = Chain[i].height;
size = h_Chain[i].width * h_Chain[i].height * sizeof(int);
err = cudaMalloc(&h_Chain[i].elements, size);
//printf("CUDA malloc Chain[%d].elements: %s\n", i, cudaGetErrorString(err));
err = cudaMemcpy(h_Chain[i].elements, Chain[i].elements, size, cudaMemcpyHostToDevice);
//printf("Copy Chain[%d].elements to device: %s\n", i, cudaGetErrorString(err));
}
// Trasfer from h_Chain to d_Chain
size = n * sizeof(Matrix);
err = cudaMalloc(&d_Chain, size);
//printf("CUDA malloc Chain: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(d_Chain, h_Chain, size, cudaMemcpyHostToDevice);
while (n > 1) {
// ************************** Find optimal multiplications ******************
// Fill up ChainDims
numDims = n - 1;
numMuls = 0;
ChainDims = (int*)malloc(numDims * sizeof(int));
ChainDimOrder = (int*)malloc(numDims * sizeof(int));
h_muls = (int*)malloc(numDims * sizeof(int));
for(int i = 0; i < numDims; ++i) {
ChainDims[i] = h_Chain[i].width;
ChainDimOrder[i] = i;
}
// Sort ChainDims
SequentialSelectionSortDouble(ChainDims, ChainDimOrder, numDims);
// Select muls
for(int i = 0, j = 0;i < numDims; ++i) {
if(ChainDims[i] != 0 && (numMuls < 1024)) {
h_muls[j] = ChainDimOrder[i];
numMuls++;
j++;
for(int k = 0; k < numDims; k++){
if(ChainDimOrder[k] == (ChainDimOrder[i] + 1) || ChainDimOrder[k] == (ChainDimOrder[i] - 1)) {
ChainDims[k] = 0;
}
}
}
}
free(ChainDims);
free(ChainDimOrder);
SequentialSelectionSort(h_muls, numMuls);
printf("\nMultiplication choices : ");
for(int i = 0; i < numMuls; ++i) {
printf("Mat%d x Mat%d\t", h_muls[i], (h_muls[i]+1));
}
printf("\n");
// **************************************************************************
// ********************** Transfer stuff to Device **************************
// Transfer muls on device
err = cudaMalloc(&d_muls, numMuls * sizeof(int));
//printf("CUDA malloc Muls: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(d_muls, h_muls, numMuls * sizeof(int), cudaMemcpyHostToDevice);
//printf("Copy Muls to device: %s\n", cudaGetErrorString(err));
// Hold intermediate results on host with elements on device
h_IntRes = (Matrix*)malloc(numMuls * sizeof(Matrix));
// Allocate memory on device for the elements of h_IntRes
for(int i = 0; i < numMuls; ++i) {
h_IntRes[i].height = h_Chain[h_muls[i]].height;
h_IntRes[i].width = h_Chain[h_muls[i] + 1].width;
size_t size = h_IntRes[i].width * h_IntRes[i].height * sizeof(int);
err = cudaMalloc(&h_IntRes[i].elements, size);
//printf("CUDA malloc IntRes[%d]: %s\n", i, cudaGetErrorString(err));
}
// IntRes Fully on device
size = numMuls * sizeof(Matrix);
err = cudaMalloc(&d_IntRes, size);
//printf("CUDA malloc Chain: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(d_IntRes, h_IntRes, size, cudaMemcpyHostToDevice);
//printf("Copy Chain to device: %s\n", cudaGetErrorString(err));
// **************************************************************************
// *************************** Actual Multiplication ************************
dim3 dimGrid(numMuls, 256, 256);
// Call to the kernel
ChainMatMulKernel<<<dimGrid, 1>>>(d_Chain, d_muls, d_IntRes);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// **************************************************************************
// ************************** Readying for next cycle ***********************
// Update chain
for(int i = 0; i < numMuls;++i) {
// Free device memory
cudaFree(h_Chain[h_muls[i]].elements);
cudaFree(h_Chain[h_muls[i] + 1].elements);
// Update the chain
h_Chain[h_muls[i]].height = h_IntRes[i].height;
h_Chain[h_muls[i]].width = h_IntRes[i].width;
h_Chain[h_muls[i]].elements = h_IntRes[i].elements;
}
// Reduce the size of the h_Chain array
for(int i = 0; i < numMuls; ++i){
h_Chain[h_muls[i]+1].width = 0;
h_Chain[h_muls[i]+1].height = 0;
}
for(int i = 0, j =0; i < n; ++i) {
if(h_Chain[i+j].width == 0) {
j++;
n--;
}
h_Chain[i].width = h_Chain[i + j].width;
h_Chain[i].height = h_Chain[i + j].height;
h_Chain[i].elements = h_Chain[i + j].elements;
}
// Small memory leak here - (but removing this is difficult)
// Refresh d_Chain
cudaFree(d_Chain);
size = n * sizeof(Matrix);
err = cudaMalloc(&d_Chain, size);
//printf("CUDA malloc Chain: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(d_Chain, h_Chain, size, cudaMemcpyHostToDevice);
//printf("Copy Chain to device: %s\n", cudaGetErrorString(err));
// Free stuff
free(h_muls);
cudaFree(d_muls);
free(h_IntRes);
cudaFree(d_IntRes);
// **************************************************************************
}
// Read Result from device memory
Result.width = h_Chain[0].width;
Result.height = h_Chain[0].height;
size = Result.width * Result.height * sizeof(int);
Result.elements = (int*)malloc(size);
err = cudaMemcpy(Result.elements, h_Chain[0].elements, size, cudaMemcpyDeviceToHost);
//printf("Copy Result off of device: %s\n",cudaGetErrorString(err));
cudaFree(h_Chain[0].elements);
cudaFree(d_Chain);
free(h_Chain);
return Result;
}
// Usage: multNoShare a1 a2 b2
int main(int argc, char* argv[]){
Matrix* Chain;
Matrix Result;
int* dims;
if(argc != 2) {
printf("Please input in the following format\n multNoShare.out [#FileName] \n");
return 0;
}
char const* const fileName = argv[1]; /* should check that argc > 1 */
FILE* file = fopen(fileName, "r"); /* should check the result */
char nLine[10];
char line[256 * 5];
int lineNums[256];
// Read values from file
int n = atoi(fgets(nLine, sizeof(nLine), file)) - 1;
int sizeDim = sizeof(char) * (n+1) * 5;
char* dimLine = (char *)malloc(sizeDim);
fgets(dimLine, sizeof(char) * (n+1) * 5, file);
char oneNum[5];
dims = (int *) malloc((n+1)*sizeof(int));
for(int i = 0, k = 0; dimLine[i] != '\0' ;++i) {
int j = 0;
for(; dimLine[i] != ' ' && dimLine[i]!= '\0'; ++j,++i){
oneNum[j] = dimLine[i];
}
oneNum[j] = '\0';
dims[k++] = atoi(oneNum);
}
Chain = (Matrix *) malloc(n*sizeof(Matrix));
for(int i = 0; i < n; ++i) {
Chain[i].height = dims[i];
Chain[i].width = dims[i+1];
Chain[i].elements = (int*)malloc(Chain[i].width * Chain[i].height * sizeof(int));
}
for(int k = 0; k < n; ++k)
for(int i = 0; i < Chain[k].height; i++){
fgets(line, sizeof(line), file);
for(int p = 0, q = 0; line[p] != '\0' ;++p) {
int r = 0;
for(; line[p] != ' ' && line[p]!= '\0'; ++r,++p){
oneNum[r] = line[p];
}
oneNum[r] = '\0';
lineNums[q++] = atoi(oneNum);
}
for(int j = 0; j < Chain[k].width; j++) {
Chain[k].elements[i*Chain[k].width + j] = lineNums[j];
}
}
fclose(file);
printf("Print up to a 10x10 portion of the matrices - to avoid clutter");
for(int k = 0; k < n; ++k) {
printf("\n Chain[%d] : %d x %d\n", k, Chain[k].height, Chain[k].width);
for(int i = 0; i < min(10, Chain[k].height); i++){
for(int j = 0; j < min(10, Chain[k].width); j++)
printf("%d ", Chain[k].elements[i*Chain[k].width + j]);
printf("\n");
}
}
printf("\n");
Result = ChainMatMul(Chain, n);
// Print up to a 10x10 portion of the Result
printf("\n Result : %d x %d\n", Result.height, Result.width);
for(int i = 0; i < min(10, Result.height); i++){
for(int j = 0; j < min(10, Result.width); j++)
printf("%d ", Result.elements[i*Result.width + j]);
printf("\n");
}
}
|
545b8015f6b379f39af5d6c7c7b36b2218607427.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <iostream>
using namespace std;
__global__
void convStandard(uint32_t* d_MATDIM, uint32_t* d_KERDIM, double* mat, double* ker, double* res) {
uint32_t MATDIM = d_MATDIM[0];
uint32_t KERDIM = d_KERDIM[0];
uint32_t threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)) {
uint32_t index = KERDIM/2 + (KERDIM/2) * MATDIM + (threadID / (MATDIM-KERDIM+1)) * MATDIM + threadID % (MATDIM-KERDIM+1);
double sum = 0.0;
for(int i = -((int32_t) KERDIM)/2; i < ((int32_t) KERDIM)/2+1; i++){
for(int j = -((int32_t) KERDIM)/2; j < ((int32_t) KERDIM)/2+1; j++){
sum += mat[index + i*MATDIM + j] * ker[(i+KERDIM/2) * KERDIM + (j+KERDIM/2)];
}
}
res[threadID] = sum;
}
}
__global__
void convBinW(uint32_t* d_MATDIM, uint32_t* d_KERDIM, double* mat, unsigned char* ker, double* res) {
uint32_t MATDIM = d_MATDIM[0];
uint32_t KERDIM = d_KERDIM[0];
uint32_t threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)) {
uint32_t index_mat = KERDIM/2 + (KERDIM/2) * MATDIM + (threadID / (MATDIM-KERDIM+1)) * MATDIM + threadID % (MATDIM-KERDIM+1);
double sum = 0.0;
for(int i = -((int32_t) KERDIM)/2; i < ((int32_t) KERDIM)/2+1; i++){
for(int j = -((int32_t) KERDIM)/2; j < ((int32_t) KERDIM)/2+1; j++){
uint32_t index_ker = (i+KERDIM/2) * KERDIM + (j+KERDIM/2);
if ((unsigned char)((unsigned char)(ker[index_ker/8] << (index_ker % 8)) >> 7) == 1)
sum += mat[index_mat + i*MATDIM + j];
else
sum -= mat[index_mat + i*MATDIM + j];
}
}
res[threadID] = sum;
}
}
__global__
void convBinWBinI(uint32_t* d_MATDIM, uint32_t* d_KERDIM, unsigned char* mat, unsigned char* ker, unsigned char* res) {
uint32_t MATDIM = d_MATDIM[0];
uint32_t KERDIM = d_KERDIM[0];
uint32_t xnor_number = 0;
unsigned char bit_counter = 0;
unsigned char pop_count = 0;
uint32_t threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)) {
uint32_t index_mat = KERDIM/2 + (KERDIM/2) * MATDIM + (threadID / (MATDIM-KERDIM+1)) * MATDIM + threadID % (MATDIM-KERDIM+1);
for(int i = -((int32_t) KERDIM)/2; i < ((int32_t) KERDIM)/2+1; i++){
for(int j = -((int32_t) KERDIM)/2; j < ((int32_t) KERDIM)/2+1; j++){
uint32_t index_ker = (i+(KERDIM >> 1)) * KERDIM + (j+(KERDIM >> 1));
uint32_t index_mat_bin = index_mat + i*MATDIM + j;
if (bit_counter == 32) {
pop_count += (unsigned char) __popc((unsigned int) xnor_number);
bit_counter = 0;
xnor_number = 0;
} else {
if (((ker[index_ker >> 3] << (index_ker % 8)) >> 7) == ((mat[index_mat_bin >> 3] << (index_mat_bin % 8)) >> 7))
xnor_number |= 1 << bit_counter;
bit_counter++;
}
}
}
pop_count += (unsigned char) __popc((unsigned int) xnor_number);
res[threadID] = pop_count;
}
}
void initMat(uint32_t dim, double* mat) {
for (uint32_t i = 0; i < dim; i++) {
for (uint32_t j = 0; j < dim; j++) {
mat[i*dim+j] = (double) rand() / RAND_MAX * 2.0 - 1.0;
}
}
}
void convertToBinary(uint32_t dim, double* mat, uint32_t size_bin, unsigned char* mat_bin) {
uint32_t pos_bit = 0;
uint32_t pos_byte = 0;
unsigned char sum = 0;
for (uint32_t i = 0; i < dim; i++) {
for (uint32_t j = 0; j < dim; j++) {
if (mat[i*dim+j] >= 0) {
sum += pow(2, 7-pos_bit);
}
if (pos_bit == 7) {
mat_bin[pos_byte] = sum;
pos_byte++;
sum = 0;
pos_bit = 0;
} else {
pos_bit++;
}
}
}
if (dim*dim % 8 != 0) {
mat_bin[pos_byte] = sum;
}
}
void printMatrix(uint32_t dim, double* mat) {
cout << "dim: " << dim << "x" << dim << "\n{\n";
for (uint32_t i = 0; i < dim; i++) {
for (uint32_t j = 0; j < dim; j++) {
cout << mat[i*dim+j] << ", ";
}
cout << '\n';
}
cout << "}\n";
}
void printBinary(uint32_t dim, uint32_t size_bin, unsigned char* mat) {
unsigned char rest;
for (uint32_t i = 0; i < size_bin; i++) {
rest = mat[i];
for (uint32_t j = 0; j < 8; j++) {
if (i * 8 + j == dim*dim) {
cout << "\n";
break;
}
if(rest - pow(2,7-j) >= 0) {
rest = rest - pow(2,7-j);
cout << "1 ";
} else {
cout << "0 ";
}
if((i * 8 + j + 1) % dim == 0) {
cout << "\n";
}
}
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
cout << "Usage: srun out <int: dimension of input matrix> <int: dimension of kernel> <blocksize>\n";
return 0;
}
uint32_t MATDIM = strtol(argv[1], NULL, 10);
uint32_t KERDIM = strtol(argv[2], NULL, 10);
uint32_t N = strtol(argv[3], NULL, 10);
uint32_t h_MATDIM[1];
h_MATDIM[0] = MATDIM;
uint32_t h_KERDIM[1];
h_KERDIM[0] = KERDIM;
struct timespec tstart={0,0}, tend={0,0};
double elapsed;
// Matrix (double)
double h_mat[MATDIM*MATDIM];
// Kernel (double)
double h_ker[KERDIM*KERDIM];
// Matrix (bits)
unsigned char h_mat_bin[(uint32_t) ceil(MATDIM*MATDIM/8.0)];
// Kernel (bits)
unsigned char h_ker_bin[(uint32_t) ceil(KERDIM*KERDIM/8.0)];
// Result of standard convolution
double h_res_standard[(MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)];
// Result of convolution with binary weights
double h_res_binW[(MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)];
// Result of convolution with binary weights and binary inputs
unsigned char h_res_binWbinI[(MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)];
uint32_t mat_size = MATDIM*MATDIM * sizeof(double);
uint32_t ker_size = KERDIM*KERDIM * sizeof(double);
uint32_t mat_bin_size = (uint32_t) ceil(MATDIM*MATDIM/8.0) * sizeof(unsigned char);
uint32_t ker_bin_size = (uint32_t) ceil(KERDIM*KERDIM/8.0) * sizeof(unsigned char);
uint32_t res_standard_size = (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1) * sizeof(double);
uint32_t res_binW_size = (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1) * sizeof(double);
uint32_t res_binWbinI_size = (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1) * sizeof(unsigned char);
// Pointers for allocation on device
uint32_t *d_MATDIM, *d_KERDIM;
double *d_mat, *d_ker, *d_res_standard, *d_res_binW;
unsigned char *d_mat_bin, *d_ker_bin, *d_res_binWbinI;
// Allocate all matrices on device (hipFree later!)
hipMalloc((void**) &d_mat, mat_size);
hipMalloc((void**) &d_ker, ker_size);
hipMalloc((void**) &d_mat_bin, mat_bin_size);
hipMalloc((void**) &d_ker_bin, ker_bin_size);
hipMalloc((void**) &d_res_standard, res_standard_size);
hipMalloc((void**) &d_res_binW, res_binW_size);
hipMalloc((void**) &d_res_binWbinI, res_binWbinI_size);
hipMalloc((void**) &d_MATDIM, sizeof(uint32_t));
hipMalloc((void**) &d_KERDIM, sizeof(uint32_t));
// Seed for random number generation
srand(time(NULL));
// Randomize the values of the double matrix with values -1.0 ... 1.0
initMat(MATDIM, h_mat);
// Convert the double matrix into binary (0 = -1, 1 = 1)
convertToBinary(MATDIM, h_mat, (uint32_t) ceil(MATDIM*MATDIM/8.0), h_mat_bin);
// TODO DEBUG: Print the double matrix.
printMatrix(MATDIM, h_mat);
// TODO DEBUG: Print the binary matrix.
printBinary(MATDIM, (uint32_t) ceil(MATDIM*MATDIM/8.0), h_mat_bin);
initMat(KERDIM, h_ker);
// Convert the double matrix into binary
convertToBinary(KERDIM, h_ker, (uint32_t) ceil(KERDIM*KERDIM/8.0), h_ker_bin);
// TODO DEBUG: Print the double matrix.
printMatrix(KERDIM, h_ker);
// TODO DEBUG: Print the binary matrix.
printBinary(KERDIM, (uint32_t) ceil(KERDIM*KERDIM/8.0), h_ker_bin);
// Copy all the matrices to the device (except the result matrices)
hipMemcpy(d_mat, h_mat, mat_size, hipMemcpyHostToDevice);
hipMemcpy(d_ker, h_ker, ker_size, hipMemcpyHostToDevice);
hipMemcpy(d_mat_bin, h_mat_bin, mat_bin_size, hipMemcpyHostToDevice);
hipMemcpy(d_ker_bin, h_ker_bin, ker_bin_size, hipMemcpyHostToDevice);
hipMemcpy(d_MATDIM, h_MATDIM, sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(d_KERDIM, h_KERDIM, sizeof(uint32_t), hipMemcpyHostToDevice);
uint32_t grid_size = ceil((MATDIM-KERDIM+1) * (MATDIM-KERDIM+1) / ((double) N));
// Compute the different modes of convolution
clock_gettime(CLOCK_MONOTONIC, &tstart);
hipLaunchKernelGGL(( convStandard), dim3(grid_size), dim3(N), 0, 0, d_MATDIM, d_KERDIM, d_mat, d_ker, d_res_standard);
clock_gettime(CLOCK_MONOTONIC, &tend);
elapsed = ((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) - ((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec);
cout << "Standard convolution took " << elapsed << " seconds.\n";
clock_gettime(CLOCK_MONOTONIC, &tstart);
hipLaunchKernelGGL(( convBinW), dim3(grid_size), dim3(N), 0, 0, d_MATDIM, d_KERDIM, d_mat, d_ker_bin, d_res_binW);
clock_gettime(CLOCK_MONOTONIC, &tend);
elapsed = ((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) - ((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec);
cout << "Binary weights took " << elapsed << " nanoseconds.\n";
clock_gettime(CLOCK_MONOTONIC, &tstart);
hipLaunchKernelGGL(( convBinWBinI), dim3(grid_size), dim3(N), 0, 0, d_MATDIM, d_KERDIM, d_mat_bin, d_ker_bin, d_res_binWbinI);
clock_gettime(CLOCK_MONOTONIC, &tend);
elapsed = ((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) - ((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec);
cout << "Binary inputs and binary weights took " << elapsed << " nanoseconds.\n";
cout << elapsed << "\n";
// Fetch the results from device
hipMemcpy(h_res_standard, d_res_standard, res_standard_size, hipMemcpyDeviceToHost);
hipMemcpy(h_res_binW, d_res_binW, res_binW_size, hipMemcpyDeviceToHost);
hipMemcpy(h_res_binWbinI, d_res_binWbinI, res_binWbinI_size, hipMemcpyDeviceToHost);
// TODO DEBUG: Print the results
cout << "Standard convolution DOUBLExDOUBLE\n";
printMatrix(MATDIM-KERDIM+1, h_res_standard);
cout << "Binary weight convolution DOUBLExBITS\n";
printMatrix(MATDIM-KERDIM+1, h_res_binW);
cout << "Binary weights and binary inputs BITSxBITS\n";
printMatrix(MATDIM-KERDIM+1, (double*) h_res_binWbinI);
cout << "dim: " << MATDIM-KERDIM+1 << "x" << MATDIM-KERDIM+1 << "\n{\n";
for (uint32_t i = 0; i < MATDIM-KERDIM+1; i++) {
for (uint32_t j = 0; j < MATDIM-KERDIM+1; j++) {
cout << (uint32_t) h_res_binWbinI[i*(MATDIM-KERDIM+1)+j] << ", ";
}
cout << '\n';
}
cout << "}\n";
hipFree(d_mat);
hipFree(d_ker);
hipFree(d_mat_bin);
hipFree(d_ker_bin);
hipFree(d_res_standard);
hipFree(d_res_binW);
hipFree(d_res_binWbinI);
hipFree(d_MATDIM);
hipFree(d_KERDIM);
return 0;
}
| 545b8015f6b379f39af5d6c7c7b36b2218607427.cu | #include <cuda.h>
#include <math.h>
#include <iostream>
using namespace std;
__global__
void convStandard(uint32_t* d_MATDIM, uint32_t* d_KERDIM, double* mat, double* ker, double* res) {
uint32_t MATDIM = d_MATDIM[0];
uint32_t KERDIM = d_KERDIM[0];
uint32_t threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)) {
uint32_t index = KERDIM/2 + (KERDIM/2) * MATDIM + (threadID / (MATDIM-KERDIM+1)) * MATDIM + threadID % (MATDIM-KERDIM+1);
double sum = 0.0;
for(int i = -((int32_t) KERDIM)/2; i < ((int32_t) KERDIM)/2+1; i++){
for(int j = -((int32_t) KERDIM)/2; j < ((int32_t) KERDIM)/2+1; j++){
sum += mat[index + i*MATDIM + j] * ker[(i+KERDIM/2) * KERDIM + (j+KERDIM/2)];
}
}
res[threadID] = sum;
}
}
__global__
void convBinW(uint32_t* d_MATDIM, uint32_t* d_KERDIM, double* mat, unsigned char* ker, double* res) {
uint32_t MATDIM = d_MATDIM[0];
uint32_t KERDIM = d_KERDIM[0];
uint32_t threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)) {
uint32_t index_mat = KERDIM/2 + (KERDIM/2) * MATDIM + (threadID / (MATDIM-KERDIM+1)) * MATDIM + threadID % (MATDIM-KERDIM+1);
double sum = 0.0;
for(int i = -((int32_t) KERDIM)/2; i < ((int32_t) KERDIM)/2+1; i++){
for(int j = -((int32_t) KERDIM)/2; j < ((int32_t) KERDIM)/2+1; j++){
uint32_t index_ker = (i+KERDIM/2) * KERDIM + (j+KERDIM/2);
if ((unsigned char)((unsigned char)(ker[index_ker/8] << (index_ker % 8)) >> 7) == 1)
sum += mat[index_mat + i*MATDIM + j];
else
sum -= mat[index_mat + i*MATDIM + j];
}
}
res[threadID] = sum;
}
}
__global__
void convBinWBinI(uint32_t* d_MATDIM, uint32_t* d_KERDIM, unsigned char* mat, unsigned char* ker, unsigned char* res) {
uint32_t MATDIM = d_MATDIM[0];
uint32_t KERDIM = d_KERDIM[0];
uint32_t xnor_number = 0;
unsigned char bit_counter = 0;
unsigned char pop_count = 0;
uint32_t threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)) {
uint32_t index_mat = KERDIM/2 + (KERDIM/2) * MATDIM + (threadID / (MATDIM-KERDIM+1)) * MATDIM + threadID % (MATDIM-KERDIM+1);
for(int i = -((int32_t) KERDIM)/2; i < ((int32_t) KERDIM)/2+1; i++){
for(int j = -((int32_t) KERDIM)/2; j < ((int32_t) KERDIM)/2+1; j++){
uint32_t index_ker = (i+(KERDIM >> 1)) * KERDIM + (j+(KERDIM >> 1));
uint32_t index_mat_bin = index_mat + i*MATDIM + j;
if (bit_counter == 32) {
pop_count += (unsigned char) __popc((unsigned int) xnor_number);
bit_counter = 0;
xnor_number = 0;
} else {
if (((ker[index_ker >> 3] << (index_ker % 8)) >> 7) == ((mat[index_mat_bin >> 3] << (index_mat_bin % 8)) >> 7))
xnor_number |= 1 << bit_counter;
bit_counter++;
}
}
}
pop_count += (unsigned char) __popc((unsigned int) xnor_number);
res[threadID] = pop_count;
}
}
void initMat(uint32_t dim, double* mat) {
for (uint32_t i = 0; i < dim; i++) {
for (uint32_t j = 0; j < dim; j++) {
mat[i*dim+j] = (double) rand() / RAND_MAX * 2.0 - 1.0;
}
}
}
void convertToBinary(uint32_t dim, double* mat, uint32_t size_bin, unsigned char* mat_bin) {
uint32_t pos_bit = 0;
uint32_t pos_byte = 0;
unsigned char sum = 0;
for (uint32_t i = 0; i < dim; i++) {
for (uint32_t j = 0; j < dim; j++) {
if (mat[i*dim+j] >= 0) {
sum += pow(2, 7-pos_bit);
}
if (pos_bit == 7) {
mat_bin[pos_byte] = sum;
pos_byte++;
sum = 0;
pos_bit = 0;
} else {
pos_bit++;
}
}
}
if (dim*dim % 8 != 0) {
mat_bin[pos_byte] = sum;
}
}
void printMatrix(uint32_t dim, double* mat) {
cout << "dim: " << dim << "x" << dim << "\n{\n";
for (uint32_t i = 0; i < dim; i++) {
for (uint32_t j = 0; j < dim; j++) {
cout << mat[i*dim+j] << ", ";
}
cout << '\n';
}
cout << "}\n";
}
void printBinary(uint32_t dim, uint32_t size_bin, unsigned char* mat) {
unsigned char rest;
for (uint32_t i = 0; i < size_bin; i++) {
rest = mat[i];
for (uint32_t j = 0; j < 8; j++) {
if (i * 8 + j == dim*dim) {
cout << "\n";
break;
}
if(rest - pow(2,7-j) >= 0) {
rest = rest - pow(2,7-j);
cout << "1 ";
} else {
cout << "0 ";
}
if((i * 8 + j + 1) % dim == 0) {
cout << "\n";
}
}
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
cout << "Usage: srun out <int: dimension of input matrix> <int: dimension of kernel> <blocksize>\n";
return 0;
}
uint32_t MATDIM = strtol(argv[1], NULL, 10);
uint32_t KERDIM = strtol(argv[2], NULL, 10);
uint32_t N = strtol(argv[3], NULL, 10);
uint32_t h_MATDIM[1];
h_MATDIM[0] = MATDIM;
uint32_t h_KERDIM[1];
h_KERDIM[0] = KERDIM;
struct timespec tstart={0,0}, tend={0,0};
double elapsed;
// Matrix (double)
double h_mat[MATDIM*MATDIM];
// Kernel (double)
double h_ker[KERDIM*KERDIM];
// Matrix (bits)
unsigned char h_mat_bin[(uint32_t) ceil(MATDIM*MATDIM/8.0)];
// Kernel (bits)
unsigned char h_ker_bin[(uint32_t) ceil(KERDIM*KERDIM/8.0)];
// Result of standard convolution
double h_res_standard[(MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)];
// Result of convolution with binary weights
double h_res_binW[(MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)];
// Result of convolution with binary weights and binary inputs
unsigned char h_res_binWbinI[(MATDIM-KERDIM+1)*(MATDIM-KERDIM+1)];
uint32_t mat_size = MATDIM*MATDIM * sizeof(double);
uint32_t ker_size = KERDIM*KERDIM * sizeof(double);
uint32_t mat_bin_size = (uint32_t) ceil(MATDIM*MATDIM/8.0) * sizeof(unsigned char);
uint32_t ker_bin_size = (uint32_t) ceil(KERDIM*KERDIM/8.0) * sizeof(unsigned char);
uint32_t res_standard_size = (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1) * sizeof(double);
uint32_t res_binW_size = (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1) * sizeof(double);
uint32_t res_binWbinI_size = (MATDIM-KERDIM+1)*(MATDIM-KERDIM+1) * sizeof(unsigned char);
// Pointers for allocation on device
uint32_t *d_MATDIM, *d_KERDIM;
double *d_mat, *d_ker, *d_res_standard, *d_res_binW;
unsigned char *d_mat_bin, *d_ker_bin, *d_res_binWbinI;
// Allocate all matrices on device (cudaFree later!)
cudaMalloc((void**) &d_mat, mat_size);
cudaMalloc((void**) &d_ker, ker_size);
cudaMalloc((void**) &d_mat_bin, mat_bin_size);
cudaMalloc((void**) &d_ker_bin, ker_bin_size);
cudaMalloc((void**) &d_res_standard, res_standard_size);
cudaMalloc((void**) &d_res_binW, res_binW_size);
cudaMalloc((void**) &d_res_binWbinI, res_binWbinI_size);
cudaMalloc((void**) &d_MATDIM, sizeof(uint32_t));
cudaMalloc((void**) &d_KERDIM, sizeof(uint32_t));
// Seed for random number generation
srand(time(NULL));
// Randomize the values of the double matrix with values -1.0 ... 1.0
initMat(MATDIM, h_mat);
// Convert the double matrix into binary (0 = -1, 1 = 1)
convertToBinary(MATDIM, h_mat, (uint32_t) ceil(MATDIM*MATDIM/8.0), h_mat_bin);
// TODO DEBUG: Print the double matrix.
printMatrix(MATDIM, h_mat);
// TODO DEBUG: Print the binary matrix.
printBinary(MATDIM, (uint32_t) ceil(MATDIM*MATDIM/8.0), h_mat_bin);
initMat(KERDIM, h_ker);
// Convert the double matrix into binary
convertToBinary(KERDIM, h_ker, (uint32_t) ceil(KERDIM*KERDIM/8.0), h_ker_bin);
// TODO DEBUG: Print the double matrix.
printMatrix(KERDIM, h_ker);
// TODO DEBUG: Print the binary matrix.
printBinary(KERDIM, (uint32_t) ceil(KERDIM*KERDIM/8.0), h_ker_bin);
// Copy all the matrices to the device (except the result matrices)
cudaMemcpy(d_mat, h_mat, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_ker, h_ker, ker_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_bin, h_mat_bin, mat_bin_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_ker_bin, h_ker_bin, ker_bin_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_MATDIM, h_MATDIM, sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_KERDIM, h_KERDIM, sizeof(uint32_t), cudaMemcpyHostToDevice);
uint32_t grid_size = ceil((MATDIM-KERDIM+1) * (MATDIM-KERDIM+1) / ((double) N));
// Compute the different modes of convolution
clock_gettime(CLOCK_MONOTONIC, &tstart);
convStandard<<<grid_size, N>>>(d_MATDIM, d_KERDIM, d_mat, d_ker, d_res_standard);
clock_gettime(CLOCK_MONOTONIC, &tend);
elapsed = ((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) - ((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec);
cout << "Standard convolution took " << elapsed << " seconds.\n";
clock_gettime(CLOCK_MONOTONIC, &tstart);
convBinW<<<grid_size, N>>>(d_MATDIM, d_KERDIM, d_mat, d_ker_bin, d_res_binW);
clock_gettime(CLOCK_MONOTONIC, &tend);
elapsed = ((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) - ((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec);
cout << "Binary weights took " << elapsed << " nanoseconds.\n";
clock_gettime(CLOCK_MONOTONIC, &tstart);
convBinWBinI<<<grid_size, N>>>(d_MATDIM, d_KERDIM, d_mat_bin, d_ker_bin, d_res_binWbinI);
clock_gettime(CLOCK_MONOTONIC, &tend);
elapsed = ((double)tend.tv_sec + 1.0e-9*tend.tv_nsec) - ((double)tstart.tv_sec + 1.0e-9*tstart.tv_nsec);
cout << "Binary inputs and binary weights took " << elapsed << " nanoseconds.\n";
cout << elapsed << "\n";
// Fetch the results from device
cudaMemcpy(h_res_standard, d_res_standard, res_standard_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_res_binW, d_res_binW, res_binW_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_res_binWbinI, d_res_binWbinI, res_binWbinI_size, cudaMemcpyDeviceToHost);
// TODO DEBUG: Print the results
cout << "Standard convolution DOUBLExDOUBLE\n";
printMatrix(MATDIM-KERDIM+1, h_res_standard);
cout << "Binary weight convolution DOUBLExBITS\n";
printMatrix(MATDIM-KERDIM+1, h_res_binW);
cout << "Binary weights and binary inputs BITSxBITS\n";
printMatrix(MATDIM-KERDIM+1, (double*) h_res_binWbinI);
cout << "dim: " << MATDIM-KERDIM+1 << "x" << MATDIM-KERDIM+1 << "\n{\n";
for (uint32_t i = 0; i < MATDIM-KERDIM+1; i++) {
for (uint32_t j = 0; j < MATDIM-KERDIM+1; j++) {
cout << (uint32_t) h_res_binWbinI[i*(MATDIM-KERDIM+1)+j] << ", ";
}
cout << '\n';
}
cout << "}\n";
cudaFree(d_mat);
cudaFree(d_ker);
cudaFree(d_mat_bin);
cudaFree(d_ker_bin);
cudaFree(d_res_standard);
cudaFree(d_res_binW);
cudaFree(d_res_binWbinI);
cudaFree(d_MATDIM);
cudaFree(d_KERDIM);
return 0;
}
|
36120f2b08c1917fbb2ef4f17162466d1e5e1c02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "corr_cuda_kernel.h"
#define ROUND_OFF 50000
#define CUDA_NUM_THREADS 1024
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define GET_BLOCKS(n, t) (n+t-1) / t
// == Dimension rearrangement Kernel
__global__ void blob_rearrange_kernel2(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight)
{
int xy = blockIdx.x*blockDim.x + threadIdx.x;
if(xy>=widthheight)
return;
int ch = blockIdx.y;
int n = blockIdx.z;
float value=in[(n*channels+ch)*widthheight+xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width+2*padding) + xpad;
out[(n*pwidthheight+xypad)*channels + ch] = value;
}
void blob_rearrange_ongpu(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight, hipStream_t stream)
{
int threads_per_block=16;
dim3 totalBlocksRearr((widthheight-1)/threads_per_block+1, channels, num);
hipError_t err;
hipLaunchKernelGGL(( blob_rearrange_kernel2), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream,
in, out, num, channels, width, height, widthheight, padding, pwidthheight);
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed: %s\n", hipGetErrorString(err));
exit(-1);
}
}
// == Correlation Kernel
__global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const float *bottom0, const float *bottom1, float *top)
{
extern __shared__ char patch_data_char[];
float *patch_data = (float *)patch_data_char;
// First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1
int x1 = blockIdx.x*stride1 + max_displacement;
int y1 = blockIdx.y*stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ float sum[WARPS_PER_BLOCK*THREADS_PER_WARP];
// Compute correlation
for(int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if(ch_off == 0) {
float total_sum = 0;
for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size*kernel_size*bottomchannels;
const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x;
top[index + item*topcount] = total_sum / (float)sumelems;
}
}
// Aggregate
}
__global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const float *bottom0, const float *bottom1, float *top)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; //w-pos
int y = (index / topwidth) % topheight; //h-pos
int c = (index / topwidth / topheight) % topchannels; //channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x*stride1 + kernel_radius + max_displacement;
int y1 = y*stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
float sum = 0;
for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for(int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l;
int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
top[index + item*topcount] = sum / (float)sumelems;
}
}
void CorrelateData_ongpu(const float *rbot1, const float *rbot2, float *output, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int kernel_size, int stride1, int stride2, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int corr_type_multiply, hipStream_t stream)
{
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int shared_memory_per_block = (kernel_size*kernel_size)*nInputPlane;
int outputCount = nOutputCols * nOutputRows * nOutputPlane;
int outputThreadCount = outputCount;
if (corr_type_multiply == 1) {
dim3 totalBlocksCorr(nOutputCols, nOutputRows, batchSize);
hipLaunchKernelGGL(( CorrelateData), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(float), stream,
outputThreadCount,
batchSize, nOutputCols, nOutputRows, nOutputPlane, outputCount,
max_displacement, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, kernel_size,
stride1, stride2,
paddedbottomwidth, paddedbottomheight, nInputPlane,
rbot1, rbot2, output
);
} else {
for (int n = 0; n < batchSize; n++) {
hipLaunchKernelGGL(( CorrelateDataSubtract), dim3(GET_BLOCKS(outputThreadCount, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, stream,
outputThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane, outputCount,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2,
paddedbottomwidth, paddedbottomheight, nInputPlane,
rbot1, rbot2, output
);
}
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
float *bottom0diff, const float *bottom1, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; //channels
int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos
//Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1
float sum = 0;
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n;
float bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems;
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
__global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
const float *bottom0, float *bottom1diff, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
//int l = index % bottomwidth + pad_size; //w-pos
//int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos
//int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels
int n = index % bottomchannels; //channels
int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
float sum = 0;
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
//Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n;
float bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems;
}
}
// == Correlation Kernel Subtraction
// == Correlation Backward Pass Kernel (For Blob 0)
__global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
float *bottom0diff, const float *bottom0, const float *bottom1, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int l = index % bottomwidth + pad_size; //w-pos
int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos
int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels
//Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1
float sum = 0;
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n;
float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n]
float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n]
float sign = (bot0tmp >= bot1tmp) ? float(1.0) : float(-1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
bottom0diff[index + item*bottomcount] = sum / (float)sumelems;
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
__global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
const float *bottom0, const float *bottom1, float *bottom1diff, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int l = index % bottomwidth + pad_size; //w-pos
int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos
int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
float sum = 0;
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
//Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
// Get bottom0 data:
int idxbot = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n;
float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n]
float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n]
float sign = (bot0tmp >= bot1tmp) ? float(-1.0) : float(1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
bottom1diff[index + item*bottomcount] = sum / (float)sumelems;
}
}
void CorrelateDataBackward_ongpu(const float *rbot1, const float *rbot2, const float *gradOutput, float *gradInput1, float *gradInput2, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1, int stride2, int nInputCols, int nInputRows, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int pad_size, int corr_type_multiply, hipStream_t stream)
{
int inputCount = nInputPlane * nInputRows * nInputCols;
int botThreadCount = inputCount;
if (corr_type_multiply == 1) {
// == Run kernel Backward 0
for (int n = 0; n < batchSize; n++) {
//Bottom0
hipLaunchKernelGGL(( CorrelateDataBackward0), dim3(GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, stream,
botThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
gradInput1, rbot2, gradOutput
);
}
// == Run kernel Backward 1
for (int n = 0; n < batchSize; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward1), dim3(GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, stream,
botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
rbot1, gradInput2, gradOutput
);
}
} else {
for ( int n = 0; n < batchSize; n++ ) {
//Bottom0
hipLaunchKernelGGL(( CorrelateDataBackward0Subtract), dim3(GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, stream,
botThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
gradInput1, rbot1, rbot2, gradOutput
);
}
for (int n = 0; n < batchSize; n++ ) {
//Bottom0
hipLaunchKernelGGL(( CorrelateDataBackward1Subtract), dim3(GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, stream,
botThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
rbot1, rbot2, gradInput2, gradOutput
);
}
}
}
| 36120f2b08c1917fbb2ef4f17162466d1e5e1c02.cu | #include <vector>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "corr_cuda_kernel.h"
#define ROUND_OFF 50000
#define CUDA_NUM_THREADS 1024
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define GET_BLOCKS(n, t) (n+t-1) / t
// == Dimension rearrangement Kernel
__global__ void blob_rearrange_kernel2(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight)
{
int xy = blockIdx.x*blockDim.x + threadIdx.x;
if(xy>=widthheight)
return;
int ch = blockIdx.y;
int n = blockIdx.z;
float value=in[(n*channels+ch)*widthheight+xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width+2*padding) + xpad;
out[(n*pwidthheight+xypad)*channels + ch] = value;
}
void blob_rearrange_ongpu(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight, cudaStream_t stream)
{
int threads_per_block=16;
dim3 totalBlocksRearr((widthheight-1)/threads_per_block+1, channels, num);
cudaError_t err;
blob_rearrange_kernel2<<<totalBlocksRearr, threads_per_block, 0, stream>>>
(in, out, num, channels, width, height, widthheight, padding, pwidthheight);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
// == Correlation Kernel
__global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const float *bottom0, const float *bottom1, float *top)
{
extern __shared__ char patch_data_char[];
float *patch_data = (float *)patch_data_char;
// First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1
int x1 = blockIdx.x*stride1 + max_displacement;
int y1 = blockIdx.y*stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ float sum[WARPS_PER_BLOCK*THREADS_PER_WARP];
// Compute correlation
for(int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for(int j = 0; j < kernel_size; j++) { // HEIGHT
for(int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if(ch_off == 0) {
float total_sum = 0;
for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size*kernel_size*bottomchannels;
const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x;
top[index + item*topcount] = total_sum / (float)sumelems;
}
}
// Aggregate
}
__global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const float *bottom0, const float *bottom1, float *top)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; //w-pos
int y = (index / topwidth) % topheight; //h-pos
int c = (index / topwidth / topheight) % topchannels; //channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x*stride1 + kernel_radius + max_displacement;
int y1 = y*stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
float sum = 0;
for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for(int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l;
int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
top[index + item*topcount] = sum / (float)sumelems;
}
}
void CorrelateData_ongpu(const float *rbot1, const float *rbot2, float *output, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int kernel_size, int stride1, int stride2, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int corr_type_multiply, cudaStream_t stream)
{
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int shared_memory_per_block = (kernel_size*kernel_size)*nInputPlane;
int outputCount = nOutputCols * nOutputRows * nOutputPlane;
int outputThreadCount = outputCount;
if (corr_type_multiply == 1) {
dim3 totalBlocksCorr(nOutputCols, nOutputRows, batchSize);
CorrelateData<<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(float), stream>>>(
outputThreadCount,
batchSize, nOutputCols, nOutputRows, nOutputPlane, outputCount,
max_displacement, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, kernel_size,
stride1, stride2,
paddedbottomwidth, paddedbottomheight, nInputPlane,
rbot1, rbot2, output
);
} else {
for (int n = 0; n < batchSize; n++) {
CorrelateDataSubtract<<<GET_BLOCKS(outputThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>(
outputThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane, outputCount,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2,
paddedbottomwidth, paddedbottomheight, nInputPlane,
rbot1, rbot2, output
);
}
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
float *bottom0diff, const float *bottom1, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; //channels
int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos
//Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1
float sum = 0;
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n;
float bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems;
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
__global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
const float *bottom0, float *bottom1diff, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
//int l = index % bottomwidth + pad_size; //w-pos
//int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos
//int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels
int n = index % bottomchannels; //channels
int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
float sum = 0;
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
//Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n;
float bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems;
}
}
// == Correlation Kernel Subtraction
// == Correlation Backward Pass Kernel (For Blob 0)
__global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
float *bottom0diff, const float *bottom0, const float *bottom1, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int l = index % bottomwidth + pad_size; //w-pos
int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos
int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels
//Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1
float sum = 0;
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n;
float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n]
float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n]
float sign = (bot0tmp >= bot1tmp) ? float(1.0) : float(-1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
bottom0diff[index + item*bottomcount] = sum / (float)sumelems;
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
__global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size,
const float *bottom0, const float *bottom1, float *bottom1diff, const float *topdiff)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int l = index % bottomwidth + pad_size; //w-pos
int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos
int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
float sum = 0;
for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
//Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1
if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1))
{
xmin = max(0,xmin);
xmax = min(topwidth-1,xmax);
ymin = max(0,ymin);
ymax = min(topheight-1,ymax);
// Get bottom0 data:
int idxbot = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n;
float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n]
float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n]
float sign = (bot0tmp >= bot1tmp) ? float(-1.0) : float(1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for(int y = ymin; y <= ymax; y++) {
for(int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
bottom1diff[index + item*bottomcount] = sum / (float)sumelems;
}
}
void CorrelateDataBackward_ongpu(const float *rbot1, const float *rbot2, const float *gradOutput, float *gradInput1, float *gradInput2, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1, int stride2, int nInputCols, int nInputRows, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int pad_size, int corr_type_multiply, cudaStream_t stream)
{
int inputCount = nInputPlane * nInputRows * nInputCols;
int botThreadCount = inputCount;
if (corr_type_multiply == 1) {
// == Run kernel Backward 0
for (int n = 0; n < batchSize; n++) {
//Bottom0
CorrelateDataBackward0<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>(
botThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
gradInput1, rbot2, gradOutput
);
}
// == Run kernel Backward 1
for (int n = 0; n < batchSize; n++) {
CorrelateDataBackward1<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>(
botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
rbot1, gradInput2, gradOutput
);
}
} else {
for ( int n = 0; n < batchSize; n++ ) {
//Bottom0
CorrelateDataBackward0Subtract<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>> (
botThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
gradInput1, rbot1, rbot2, gradOutput
);
}
for (int n = 0; n < batchSize; n++ ) {
//Bottom0
CorrelateDataBackward1Subtract<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>(
botThreadCount,
batchSize, n, nOutputCols, nOutputRows, nOutputPlane,
max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_,
kernel_radius_, stride1, stride2, nInputCols, nInputRows,
paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size,
rbot1, rbot2, gradInput2, gradOutput
);
}
}
}
|
17f0bbbabd01d8250c77c557080e7e6fca073f79.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/*
* Refactor the `helloGPU` definition to be a kernel
* that can be launched on the GPU. Update its message
* to read "Hello from the GPU!"
*/
void helloGPU()
{
printf("Hello also from the CPU.\n");
}
int main()
{
helloCPU();
/*
* Refactor this call to `helloGPU` so that it launches
* as a kernel on the GPU.
*/
helloGPU();
/*
* Add code below to synchronize on the completion of the
* `helloGPU` kernel completion before continuing the CPU
* thread.
*/
}
| 17f0bbbabd01d8250c77c557080e7e6fca073f79.cu | #include <stdio.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/*
* Refactor the `helloGPU` definition to be a kernel
* that can be launched on the GPU. Update its message
* to read "Hello from the GPU!"
*/
void helloGPU()
{
printf("Hello also from the CPU.\n");
}
int main()
{
helloCPU();
/*
* Refactor this call to `helloGPU` so that it launches
* as a kernel on the GPU.
*/
helloGPU();
/*
* Add code below to synchronize on the completion of the
* `helloGPU` kernel completion before continuing the CPU
* thread.
*/
}
|
3e690fed297b6a7483358ece9e6159f10af80249.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "diff.hpp"
#include "abbrev.hpp"
typedef diff<float> dfloat;
__global__ void dosmt(dfloat* in, dfloat* out, int sz, bool* positive)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < sz)
{
out[i] = SIN(in[i]);
positive[i] = out[i] < in[i];
}
}
__global__ void vec_exp(dfloat* vec, int sz)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < sz)
vec[i] = EXP(vec[i]);
}
__global__ void
int main()
{
dfloat *h_a, *d_a, *h_s, *d_s;
bool *h_b, *d_b;
hipMalloc((void**)&d_a, sizeof(dfloat) * 1024);
hipMalloc((void**)&d_s, sizeof(dfloat) * 1024);
hipMalloc((void**)&d_b, sizeof(bool) * 1024);
h_a = new dfloat[1024];
h_s = new dfloat[1024];
h_b = new bool[1024];
for (int i = 0; i < 1024; ++i)
{
h_a[i] = dfloat((float)i, 1.0f);
}
hipMemcpy(d_a, h_a, sizeof(dfloat)*1024, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dosmt), dim3(1), dim3(1024), 0, 0, d_a, d_s, 1024, d_b);
hipMemcpy(h_s, d_s, sizeof(dfloat)*1024, hipMemcpyDeviceToHost);
hipMemcpy(h_b, d_b, sizeof(bool)*1024, hipMemcpyDeviceToHost);
for (int i = 0; i < 1024; ++i)
{
std::cout << h_b[i] << h_s[i] << std::endl;
}
hipFree(d_a);
hipFree(d_s);
hipFree(d_b);
delete[] h_a;
delete[] h_s;
delete[] h_b;
return 0;
}
| 3e690fed297b6a7483358ece9e6159f10af80249.cu | #include "diff.hpp"
#include "abbrev.hpp"
typedef diff<float> dfloat;
__global__ void dosmt(dfloat* in, dfloat* out, int sz, bool* positive)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < sz)
{
out[i] = SIN(in[i]);
positive[i] = out[i] < in[i];
}
}
__global__ void vec_exp(dfloat* vec, int sz)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < sz)
vec[i] = EXP(vec[i]);
}
__global__ void
int main()
{
dfloat *h_a, *d_a, *h_s, *d_s;
bool *h_b, *d_b;
cudaMalloc((void**)&d_a, sizeof(dfloat) * 1024);
cudaMalloc((void**)&d_s, sizeof(dfloat) * 1024);
cudaMalloc((void**)&d_b, sizeof(bool) * 1024);
h_a = new dfloat[1024];
h_s = new dfloat[1024];
h_b = new bool[1024];
for (int i = 0; i < 1024; ++i)
{
h_a[i] = dfloat((float)i, 1.0f);
}
cudaMemcpy(d_a, h_a, sizeof(dfloat)*1024, cudaMemcpyHostToDevice);
dosmt<<<1, 1024>>>(d_a, d_s, 1024, d_b);
cudaMemcpy(h_s, d_s, sizeof(dfloat)*1024, cudaMemcpyDeviceToHost);
cudaMemcpy(h_b, d_b, sizeof(bool)*1024, cudaMemcpyDeviceToHost);
for (int i = 0; i < 1024; ++i)
{
std::cout << h_b[i] << h_s[i] << std::endl;
}
cudaFree(d_a);
cudaFree(d_s);
cudaFree(d_b);
delete[] h_a;
delete[] h_s;
delete[] h_b;
return 0;
}
|
1272b82f1428b0be6d6ac0e775775317e74294e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "common\book.h"
#include "common\cpu_bitmap.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define DIM 500
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b) : r(a), i(b) {}
__device__ float magnitude2 (void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for(i = 0; i < 200; ++i) {
a = a * a + c;
if(a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia ( x, y);
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap,
bitmap.image_size()));
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap);
HANDLE_ERROR( hipMemcpy ( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR( hipFree(dev_bitmap));
} | 1272b82f1428b0be6d6ac0e775775317e74294e5.cu | #include "common\book.h"
#include "common\cpu_bitmap.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define DIM 500
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b) : r(a), i(b) {}
__device__ float magnitude2 (void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for(i = 0; i < 200; ++i) {
a = a * a + c;
if(a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia ( x, y);
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap,
bitmap.image_size()));
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>(dev_bitmap);
HANDLE_ERROR( cudaMemcpy ( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR( cudaFree(dev_bitmap));
} |
e42e730af693977fdd0fe67dbc288729b7130e71.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "crop_layer.h"
#include "dark_cuda.h"
#include "image.h"
#include "utils.h"
__device__ float get_pixel_kernel(
float* image, int w, int h, int x, int y, int c)
{
if (x < 0 || x >= w || y < 0 || y >= h)
return 0;
return image[x + w * (y + c * h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ((r > b) ? r : b) : ((g > b) ? g : b);
float min = (r < g) ? ((r < b) ? r : b) : ((g < b) ? g : b);
float delta = max - min;
v = max;
if (max == 0)
{
s = 0;
h = -1;
}
else
{
s = delta / max;
if (r == max)
{
h = (g - b) / delta;
}
else if (g == max)
{
h = 2 + (b - r) / delta;
}
else
{
h = 4 + (r - g) / delta;
}
if (h < 0)
h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0)
{
r = g = b = v;
}
else
{
int index = (int)floorf(h);
f = h - index;
p = v * (1 - s);
q = v * (1 - s * f);
t = v * (1 - s * (1 - f));
if (index == 0)
{
r = v;
g = t;
b = p;
}
else if (index == 1)
{
r = q;
g = v;
b = p;
}
else if (index == 2)
{
r = p;
g = v;
b = t;
}
else if (index == 3)
{
r = p;
g = q;
b = v;
}
else if (index == 4)
{
r = t;
g = p;
b = v;
}
else
{
r = v;
g = p;
b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(
float* image, int w, int h, float x, float y, int c)
{
int ix = (int)floorf(x);
int iy = (int)floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1 - dy) * (1 - dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1 - dx) * get_pixel_kernel(image, w, h, ix, iy + 1, c) +
(1 - dy) * dx * get_pixel_kernel(image, w, h, ix + 1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix + 1, iy + 1, c);
return val;
}
__global__ void levels_image_kernel(float* image, float* rand, int batch, int w,
int h, int train, float saturation, float exposure, float translate,
float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size)
return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8 * id + 0];
float r1 = rand[8 * id + 1];
float r2 = rand[8 * id + 2];
float r3 = rand[8 * id + 3];
saturation = r0 * (saturation - 1) + 1;
saturation = (r1 > .5) ? 1. / saturation : saturation;
exposure = r2 * (exposure - 1) + 1;
exposure = (r3 > .5) ? 1. / exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w * (y + h * 0)];
float g = image[x + w * (y + h * 1)];
float b = image[x + w * (y + h * 2)];
float3 rgb = make_float3(r, g, b);
if (train)
{
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
}
else
{
shift = 0;
}
image[x + w * (y + h * 0)] =
rgb.x * scale + translate + (rshift - .5) * shift;
image[x + w * (y + h * 1)] =
rgb.y * scale + translate + (gshift - .5) * shift;
image[x + w * (y + h * 2)] =
rgb.z * scale + translate + (bshift - .5) * shift;
}
__global__ void forward_crop_layer_kernel(float* input, float* rand, int size,
int c, int h, int w, int crop_height, int crop_width, int train, int flip,
float angle, float* output)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size)
return;
float cx = w / 2.;
float cy = h / 2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8 * b + 4];
float r5 = rand[8 * b + 5];
float r6 = rand[8 * b + 6];
float r7 = rand[8 * b + 7];
float dw = (w - crop_width) * r4;
float dh = (h - crop_height) * r5;
flip = (flip && (r6 > .5));
angle = 2 * angle * r7 - angle;
if (!train)
{
dw = (w - crop_width) / 2.;
dh = (h - crop_height) / 2.;
flip = 0;
angle = 0;
}
input += w * h * c * b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle) * (x - cx) - sin(angle) * (y - cy) + cx;
float ry = sin(angle) * (x - cx) + cos(angle) * (y - cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
void ForwardCropLayerGpu(layer* l, NetworkState state)
{
cuda_random(l->rand_gpu, l->batch * 8);
float radians = l->angle * 3.14159265 / 180.;
float scale = 2;
float translate = -1;
if (l->noadjust)
{
scale = 1;
translate = 0;
}
int size = l->batch * l->w * l->h;
hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream(),
state.input, l->rand_gpu, l->batch, l->w, l->h, state.train,
l->saturation, l->exposure, translate, scale, l->shift);
CHECK_CUDA(hipPeekAtLastError());
size = l->batch * l->c * l->out_w * l->out_h;
hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0,
get_cuda_stream(), state.input, l->rand_gpu, size, l->c, l->h, l->w,
l->out_h, l->out_w, state.train, l->flip, radians, l->output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
| e42e730af693977fdd0fe67dbc288729b7130e71.cu | #include <cublas_v2.h>
#include <cuda_runtime.h>
#include <curand.h>
#include "crop_layer.h"
#include "dark_cuda.h"
#include "image.h"
#include "utils.h"
__device__ float get_pixel_kernel(
float* image, int w, int h, int x, int y, int c)
{
if (x < 0 || x >= w || y < 0 || y >= h)
return 0;
return image[x + w * (y + c * h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ((r > b) ? r : b) : ((g > b) ? g : b);
float min = (r < g) ? ((r < b) ? r : b) : ((g < b) ? g : b);
float delta = max - min;
v = max;
if (max == 0)
{
s = 0;
h = -1;
}
else
{
s = delta / max;
if (r == max)
{
h = (g - b) / delta;
}
else if (g == max)
{
h = 2 + (b - r) / delta;
}
else
{
h = 4 + (r - g) / delta;
}
if (h < 0)
h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0)
{
r = g = b = v;
}
else
{
int index = (int)floorf(h);
f = h - index;
p = v * (1 - s);
q = v * (1 - s * f);
t = v * (1 - s * (1 - f));
if (index == 0)
{
r = v;
g = t;
b = p;
}
else if (index == 1)
{
r = q;
g = v;
b = p;
}
else if (index == 2)
{
r = p;
g = v;
b = t;
}
else if (index == 3)
{
r = p;
g = q;
b = v;
}
else if (index == 4)
{
r = t;
g = p;
b = v;
}
else
{
r = v;
g = p;
b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(
float* image, int w, int h, float x, float y, int c)
{
int ix = (int)floorf(x);
int iy = (int)floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1 - dy) * (1 - dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1 - dx) * get_pixel_kernel(image, w, h, ix, iy + 1, c) +
(1 - dy) * dx * get_pixel_kernel(image, w, h, ix + 1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix + 1, iy + 1, c);
return val;
}
__global__ void levels_image_kernel(float* image, float* rand, int batch, int w,
int h, int train, float saturation, float exposure, float translate,
float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size)
return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8 * id + 0];
float r1 = rand[8 * id + 1];
float r2 = rand[8 * id + 2];
float r3 = rand[8 * id + 3];
saturation = r0 * (saturation - 1) + 1;
saturation = (r1 > .5) ? 1. / saturation : saturation;
exposure = r2 * (exposure - 1) + 1;
exposure = (r3 > .5) ? 1. / exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w * (y + h * 0)];
float g = image[x + w * (y + h * 1)];
float b = image[x + w * (y + h * 2)];
float3 rgb = make_float3(r, g, b);
if (train)
{
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
}
else
{
shift = 0;
}
image[x + w * (y + h * 0)] =
rgb.x * scale + translate + (rshift - .5) * shift;
image[x + w * (y + h * 1)] =
rgb.y * scale + translate + (gshift - .5) * shift;
image[x + w * (y + h * 2)] =
rgb.z * scale + translate + (bshift - .5) * shift;
}
__global__ void forward_crop_layer_kernel(float* input, float* rand, int size,
int c, int h, int w, int crop_height, int crop_width, int train, int flip,
float angle, float* output)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size)
return;
float cx = w / 2.;
float cy = h / 2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8 * b + 4];
float r5 = rand[8 * b + 5];
float r6 = rand[8 * b + 6];
float r7 = rand[8 * b + 7];
float dw = (w - crop_width) * r4;
float dh = (h - crop_height) * r5;
flip = (flip && (r6 > .5));
angle = 2 * angle * r7 - angle;
if (!train)
{
dw = (w - crop_width) / 2.;
dh = (h - crop_height) / 2.;
flip = 0;
angle = 0;
}
input += w * h * c * b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle) * (x - cx) - sin(angle) * (y - cy) + cx;
float ry = sin(angle) * (x - cx) + cos(angle) * (y - cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
void ForwardCropLayerGpu(layer* l, NetworkState state)
{
cuda_random(l->rand_gpu, l->batch * 8);
float radians = l->angle * 3.14159265 / 180.;
float scale = 2;
float translate = -1;
if (l->noadjust)
{
scale = 1;
translate = 0;
}
int size = l->batch * l->w * l->h;
levels_image_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(
state.input, l->rand_gpu, l->batch, l->w, l->h, state.train,
l->saturation, l->exposure, translate, scale, l->shift);
CHECK_CUDA(cudaPeekAtLastError());
size = l->batch * l->c * l->out_w * l->out_h;
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK, 0,
get_cuda_stream()>>>(state.input, l->rand_gpu, size, l->c, l->h, l->w,
l->out_h, l->out_w, state.train, l->flip, radians, l->output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
|
59310e55448cbd75e1785c0c2c45be6f84d96cf1.hip | // !!! This is a file automatically generated by hipify!!!
// Includes, system
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <gloop/initialize.cuh>
#include <gloop/statistics.h>
#include <hip/hip_vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL(call) \
do { \
cuda_calls++; \
hipError_t err = call; \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, hipGetErrorString(err)); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CU_SAFE_CALL_NO_SYNC(call) \
do { \
hipError_t err = call; \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CUT_DEVICE_INIT_DRV(cuDevice) \
do { \
cuDevice = 0; \
int deviceCount = 0; \
hipError_t err = hipInit(0); \
if (hipSuccess == err) \
CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(hipDeviceComputeCapability(&major, &minor, dev)); \
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) \
do { \
CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while (0)
#define BIND_TEX_ARRAY(tex, arr, desc) \
do { \
CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC(ptr, size) \
do { \
hipMalloc(ptr, size); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) \
do { \
hipMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) \
do { \
hipMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
extern "C" void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C" void getReferenceString(const char* filename, char** refstr, size_t* reflen);
extern "C" void createTreeTexture(const char* filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char* dotfilename,
const char* texfilename);
extern "C" void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C" int lookupNumLeaves(ReferencePage* page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C" void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char* createTimer()
{
unsigned int* ptr = (unsigned int*)malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char*)ptr;
}
void startTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->start_m), NULL);
}
void stopTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->end_m), NULL);
}
float getTimerValue(char* ptr)
{
Timer_t* timer = (Timer_t*)ptr;
if (timer == NULL) {
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) {
stopTimer(ptr);
}
return (float)(1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char* ptr)
{
free((Timer_t*)ptr);
}
extern "C" int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C" int destroyReference(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C" int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit(1);
}
queries->qfile = qfile;
return 0;
}
extern "C" int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C" void printStringForError(int err)
{
}
extern "C" int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyInitializeContext();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
}
return 0;
}
extern "C" int destroyMatchContext(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyFinalizeContext();
}
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren* childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char* reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char*)malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y * refpitch + x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t* free_mem, size_t* total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512 * 1024 * 1024;
*total_mem = 768 * 1024 * 1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
hipChannelFormatDesc refTextureDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows * ref->pitch,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.addressMode[1] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (hipArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(hipMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc refDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch,
ref->pitch * sizeof(char),
numrows,
hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(hipMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
#if REFTEX
CUDA_SAFE_CALL(hipUnbindTexture(reftex));
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(hipFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture(nodetex));
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture(childrentex));
CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture(nodetex));
#endif
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture(childrentex));
#endif
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(hipFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.addressMode[1] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (hipArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
hipMemcpyHostToDevice));
#endif
if (ref->tex_children_height) {
#if CHILDTEX
hipChannelFormatDesc childrenTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.addressMode[1] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (hipArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
hipMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_node_hist), 0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_child_hist), 0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL(hipMemcpy((ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
#if NODETEX
hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height * sizeof(PixelOfNode));
#endif
if (ref->tex_children_height) {
// Child tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL(hipMemcpy((ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
#if CHILDTEX
hipChannelFormatDesc childTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned);
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height * sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_node_hist), 0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_child_hist), 0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x * 2];
#else
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[loc + 1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x * 2 + 1];
#else
child_buf[i] = ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count - 1]);
}
void loadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**)&queries->d_tex_array, queries->texlen);
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL(hipMemcpy((void*)queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
hipMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = hipAddressModeClamp;
qrytex.filterMode = hipFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc qryDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**)&queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void*)queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
CUDA_MALLOC((void**)&queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void*)queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries * sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(hipFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) {
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
int num_coords = lengths[i + j] - match_length + 1;
if (max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i) {
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0) {
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
unsigned int numQueries = ctx->queries->count;
assert(numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords * sizeof(MatchCoord));
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*)calloc(numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL) {
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC((void**)&ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL(hipMemset((void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**)&ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void*)ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
if (!ctx->on_cpu) {
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
hipMemcpyDeviceToHost));
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost));
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) {
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) {
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0) {
for (int j = 0; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches / BLOCKSIZE) >= MAX_GRID_DIMENSION) {
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries) {
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen) {
if (midx >= numMatches) {
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment*)calloc(alignmentOffset, sizeof(Alignment));
//hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void**)&d_matches, matchesSize);
struct Alignment* d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void**)&d_alignments, alignmentSize);
CUDA_SAFE_CALL(hipMemset((void*)d_alignments, 0, alignmentSize));
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG) {
for (int i = 0; i < numMatches; i++) {
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
hipLaunchKernelGGL(( printKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matches,
numMatches,
d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren*)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
page->begin,
page->end,
page->shadow_left,
page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
,
ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(hipMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
hipMemcpyDeviceToHost));
hipDeviceSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(hipFree(d_alignments));
CUDA_SAFE_CALL(hipFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i) {
MatchInfo& match = h_matches[i];
if (match.queryid != qry) {
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK)) {
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext* ctx, ReferencePage* page, bool on_cpu)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu) {
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else {
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord) {
// see how many queries will fit on the board
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100) {
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu) {
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else {
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++) {
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++) {
// See if there are any more left maximal alignments for this match
if (h_alignments[base + i].left_in_ref == 0) {
break;
}
if (h_matches[m].queryid != lastqry) {
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base + i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base + i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//hipHostFree((void*)h_alignments);
}
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
QuerySet* queries = ctx->queries;
char* queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size) {
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size) {
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
if (stats_filename) {
FILE* f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else {
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f, "\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename) {
FILE* f = fopen(node_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename) {
FILE* f = fopen(child_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
total_node_hits += ctx->statistics.node_hist[i];
if (i < 256) {
tree_top_node_hits += ctx->statistics.node_hist[i];
}
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
total_child_hits += ctx->statistics.child_hist[i];
if (i < 256) {
tree_top_child_hits += ctx->statistics.child_hist[i];
}
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n", (int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits / total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n", (int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits / total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
else {
Driver* drivers = nullptr;
CUDA_SAFE_CALL(hipMalloc((void**)&drivers, sizeof(Driver) * dimGrid.x));
hipLaunchKernelGGL(( mummergpuKernel1), dim3(dimGrid), dim3(dimBlock), 0, 0,
drivers, ctx->results.d_match_coords, ctx->queries->d_tex_array, (char*)ctx->ref->d_ref_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length);
for (int i = 0; i < 81; ++i) {
hipLaunchKernelGGL(( mummergpuKernel2), dim3(dimGrid), dim3(dimBlock), 0, 0,
drivers, ctx->results.d_match_coords, ctx->queries->d_tex_array, (char*)ctx->ref->d_ref_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length);
}
CUDA_SAFE_CALL(hipFree(drivers));
}
// check if kernel execution generated an error
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu) {
matchOnCPU(ctx, reverse_complement);
}
else {
matchOnGPU(ctx, reverse_complement);
hipDeviceSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu) {
getExactAlignments(ctx, page, false);
}
else {
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int* p = NULL;
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(hipMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(hipFree(p));
}
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100) {
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
lseek(ctx->queries->qfile, 0, SEEK_SET);
}
return 0;
}
void initReferencePages(MatchContext* ctx, int* num_pages, ReferencePage** pages_out)
{
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i - 1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx)
{
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C" int matchQueries(MatchContext* ctx)
{
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize, (1ULL << 30)));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
| 59310e55448cbd75e1785c0c2c45be6f84d96cf1.cu | // Includes, system
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <cuda.h>
#include <gloop/initialize.cuh>
#include <gloop/statistics.h>
#include <vector_types.h>
// includes, kernels
#include <common.cu>
#include <mummergpu.h>
#include <mummergpu_kernel.cu>
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg()
{
fprintf(stderr, "Trapped\n");
}
#define CUDA_SAFE_CALL(call) \
do { \
cuda_calls++; \
cudaError err = call; \
if (cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, cudaGetErrorString(err)); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CU_SAFE_CALL_NO_SYNC(call) \
do { \
CUresult err = call; \
if (CUDA_SUCCESS != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CUT_DEVICE_INIT_DRV(cuDevice) \
do { \
cuDevice = 0; \
int deviceCount = 0; \
CUresult err = cuInit(0); \
if (CUDA_SUCCESS == err) \
CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC(cuDeviceComputeCapability(&major, &minor, dev)); \
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} \
else \
CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) \
do { \
CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while (0)
#define BIND_TEX_ARRAY(tex, arr, desc) \
do { \
CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC(ptr, size) \
do { \
cudaMalloc(ptr, size); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) \
do { \
cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) \
do { \
cudaMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
extern "C" void computeGold(MatchResults* results,
char* refstr,
char* queries,
int* queryAddrs,
int* queryLengths,
PixelOfNode* nodeTexture,
PixelOfChildren* childrenTexture,
int numQueries,
int mismatch_length,
int rc);
extern "C" void getReferenceString(const char* filename, char** refstr, size_t* reflen);
extern "C" void createTreeTexture(const char* filename,
PixelOfNode** nodeTexture,
PixelOfChildren** childrenTexture,
unsigned int* width,
unsigned int* node_height,
unsigned int* children_height,
AuxiliaryNodeData** aux_data,
int* num_match_coords,
int min_match_len,
Statistics* statistics,
const char* dotfilename,
const char* texfilename);
extern "C" void getQueriesTexture(int qfile,
char** queryTexture,
size_t* queryLength,
int** queryAddrs,
char*** queryNames,
int** queryLengths,
unsigned int* numQueries,
unsigned int* num_match_coords,
unsigned int device_memory_avail,
int min_match_length,
bool rc);
extern "C" int lookupNumLeaves(ReferencePage* page, TextureAddress addr);
void printAlignments(ReferencePage* page,
Alignment* alignments,
char* query,
int qrylen,
TextureAddress nodeid,
int qrypos,
int edge_depth,
int min_match,
bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C" void mapQueriesEndToEnd(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* h_alignments,
unsigned int numAligments);
char* createTimer()
{
unsigned int* ptr = (unsigned int*)malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char*)ptr;
}
void startTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->start_m), NULL);
}
void stopTimer(char* ptr)
{
gettimeofday(&(((struct Timer_t*)ptr)->end_m), NULL);
}
float getTimerValue(char* ptr)
{
Timer_t* timer = (Timer_t*)ptr;
if (timer == NULL) {
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) {
stopTimer(ptr);
}
return (float)(1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec)
+ (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char* ptr)
{
free((Timer_t*)ptr);
}
extern "C" int createReference(const char* fromFile, Reference* ref)
{
if (!fromFile || !ref)
return -1;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C" int destroyReference(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C" int createQuerySet(const char* fromFile, QuerySet* queries)
{
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit(1);
}
queries->qfile = qfile;
return 0;
}
extern "C" int destroyQuerySet(QuerySet* queries)
{
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C" void printStringForError(int err)
{
}
extern "C" int createMatchContext(Reference* ref,
QuerySet* queries,
MatchResults* matches,
bool on_cpu,
int min_match_length,
char* stats_file,
bool reverse,
bool forwardreverse,
bool forwardcoordinates,
bool showQueryLength,
char* dotfilename,
char* texfilename,
MatchContext* ctx)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyInitializeContext();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
}
return 0;
}
extern "C" int destroyMatchContext(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
free(ctx->full_ref);
//destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
{
gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope;
gloop::eagerlyFinalizeContext();
}
return 0;
}
void buildReferenceTexture(Reference* ref,
char* full_ref,
size_t begin,
size_t end,
int min_match_len,
char* dotfilename,
char* texfilename,
Statistics* statistics)
{
fprintf(stderr, "Building reference texture...\n");
PixelOfNode* nodeTexture = NULL;
PixelOfChildren* childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData* aux_data = NULL;
int num_nodes;
char* loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char*)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk += getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str,
&nodeTexture,
&childrenTexture,
&width,
&node_height,
&children_height,
&aux_data,
&num_nodes,
min_match_len,
statistics,
dotfilename,
texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n", ref->bytes_on_board);
#if REORDER_REF
char* reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char*)malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char* refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y * refpitch + x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t* free_mem, size_t* total_mem)
{
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512 * 1024 * 1024;
*total_mem = 768 * 1024 * 1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
cudaChannelFormatDesc refTextureDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_ref_array),
&refTextureDesc,
ref->pitch,
numrows);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray*)(ref->d_ref_array),
0,
0,
ref->h_ref_array,
numrows * ref->pitch,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.addressMode[1] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (cudaArray*)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(cudaMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc refDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_ref_array),
&refpitch,
ref->pitch * sizeof(char),
numrows);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_ref_array),
refpitch,
ref->h_ref_array,
ref->pitch,
ref->pitch * sizeof(char),
numrows,
cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void**)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(cudaMemcpy((void*)(ref->d_ref_array),
ref->str,
ref->len,
cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference* ref)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
#if REFTEX
CUDA_SAFE_CALL(cudaUnbindTexture(reftex));
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(cudaFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture(nodetex));
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture(childrentex));
CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture(nodetex));
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture(childrentex));
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(cudaFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
//loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
Reference* ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_node_tex_array),
&nodeTextureDesc,
ref->tex_width,
ref->tex_node_height);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray*)(ref->d_node_tex_array),
0,
0,
ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.addressMode[1] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (cudaArray*)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_node_tex_array),
&nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_node_tex_array),
nodepitch,
ref->h_node_tex_array,
nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height,
cudaMemcpyHostToDevice));
#endif
if (ref->tex_children_height) {
#if CHILDTEX
cudaChannelFormatDesc childrenTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_children_tex_array),
&childrenTextureDesc,
ref->tex_width,
ref->tex_children_height);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array),
0,
0,
ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.addressMode[1] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex, (cudaArray*)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH((void**)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_children_tex_array),
childpitch,
ref->h_children_tex_array,
childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
cudaMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_node_hist), 0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
// children hist
ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n", ref->bytes_on_board);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_child_hist), 0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL(cudaMemcpy((ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height * sizeof(PixelOfNode));
#endif
if (ref->tex_children_height) {
// Child tex, 1-dimensional
CUDA_MALLOC((void**)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL(cudaMemcpy((ref->d_children_tex_array),
ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
#if CHILDTEX
cudaChannelFormatDesc childTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned);
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized = false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array),
childTextureDesc, ref->tex_children_height * sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_node_hist),
ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_node_hist), 0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC((void**)(&ref->d_child_hist),
ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_child_hist), 0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x * 2];
#else
node_buf[i] = ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[loc + 1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren*)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i] = ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x * 2 + 1];
#else
child_buf[i] = ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
}
else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet* queries)
{
fprintf(stderr, "\tProcessing queries %s to %s\n",
queries->h_names[0],
queries->h_names[queries->count - 1]);
}
void loadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char* toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void**)&queries->d_tex_array, queries->texlen);
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL(cudaMemcpy((void*)queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen,
cudaMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = cudaAddressModeClamp;
qrytex.filterMode = cudaFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc qryDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void**)&queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void*)queries->d_addrs_tex_array,
queries->h_addrs_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_MALLOC((void**)&queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void*)queries->d_lengths_array,
queries->h_lengths_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
}
else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries * sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
QuerySet* queries = ctx->queries;
CUDA_SAFE_CALL(cudaFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length)
{
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext* ctx,
int** h_coord_offset_array,
unsigned int* num_coords)
{
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int* lengths = ctx->queries->h_lengths_array;
int* coord_offsets = (int*)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) {
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
int num_coords = lengths[i + j] - match_length + 1;
if (max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i) {
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0) {
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
unsigned int numQueries = ctx->queries->count;
assert(numQueries);
char* offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx,
&(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords * sizeof(MatchCoord));
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
ctx->results.h_match_coords = (MatchCoord*)calloc(numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL) {
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char* toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC((void**)&ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL(cudaMemset((void*)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void**)&ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void*)ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
}
else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext* ctx)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
if (!ctx->on_cpu) {
char* fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
cudaMemcpyDeviceToHost));
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist,
ctx->ref->d_node_hist,
ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_child_hist,
ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost));
if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) {
int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) {
temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board += getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char* string);
char numbuffer[32];
MatchCoord* coordForQueryChar(MatchContext* ctx,
unsigned int qryid,
unsigned int qrychar)
{
MatchResults* results = &(ctx->results);
MatchCoord* coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext* ctx,
ReferencePage* page,
MatchInfo** matches,
Alignment** alignments,
unsigned int mem_avail,
unsigned int* coord_idx,
unsigned int* match_idx,
unsigned int* align_idx,
unsigned int* nextqry,
unsigned int* nextqrychar)
{
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0) {
for (int j = 0; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
//fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n",
coord->node.data, lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j) {
MatchCoord* coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail || (allMatches / BLOCKSIZE) >= MAX_GRID_DIMENSION) {
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo* M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries) {
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen) {
if (midx >= numMatches) {
set_full = true;
break;
}
MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n", alignmentOffset * sizeof(Alignment), numAlignments);
*alignments = (struct Alignment*)calloc(alignmentOffset, sizeof(Alignment));
//cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext* ctx,
ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
MatchInfo* d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void**)&d_matches, matchesSize);
struct Alignment* d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void**)&d_alignments, alignmentSize);
CUDA_SAFE_CALL(cudaMemset((void*)d_alignments, 0, alignmentSize));
char* atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG) {
for (int i = 0; i < numMatches; i++) {
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n",
i,
h_matches[i].resultsoffset,
h_matches[i].queryid,
h_matches[i].matchnode.data,
h_matches[i].numLeaves,
h_matches[i].edgematch,
h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
printKernel<<<dimGrid, dimBlock, 0>>>(d_matches,
numMatches,
d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int*)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode*)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren*)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
page->begin,
page->end,
page->shadow_left,
page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
,
ctx->ref->d_node_hist,
ctx->ref->d_child_hist
#endif
);
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(cudaMemcpy((void*)alignments,
(void*)d_alignments,
alignmentSize,
cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(cudaFree(d_alignments));
CUDA_SAFE_CALL(cudaFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext* ctx, ReferencePage* page,
MatchInfo* h_matches,
unsigned int numMatches,
Alignment* alignments,
unsigned int numAlignments)
{
unsigned int min_match_length = ctx->min_match_length;
int* addrs = ctx->queries->h_addrs_tex_array;
int* lengths = ctx->queries->h_lengths_array;
char* qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i) {
MatchInfo& match = h_matches[i];
if (match.queryid != qry) {
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK)) {
printAlignments(page,
alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen,
match.matchnode,
match.qrystartpos,
match.edgematch,
min_match_length,
0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext* ctx, ReferencePage* page, bool on_cpu)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu) {
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
}
else {
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord) {
// see how many queries will fit on the board
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo* h_matches = NULL;
Alignment* h_alignments = NULL;
int coord_left = next_coord;
char* btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory,
&next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments);
if (numMatches == 0)
continue;
char buf[256];
//assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100) {
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char* ktimer = createTimer();
startTimer(ktimer);
if (on_cpu) {
runPrintOnCPU(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
else {
runPrintKernel(ctx, page, h_matches, numMatches,
h_alignments, numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
//flushOutput();
//Process the alignments
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
char* otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++) {
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++) {
// See if there are any more left maximal alignments for this match
if (h_alignments[base + i].left_in_ref == 0) {
break;
}
if (h_matches[m].queryid != lastqry) {
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n",
h_alignments[base + i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base + i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
//cudaFreeHost((void*)h_alignments);
}
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext* ctx, size_t device_mem_avail)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
QuerySet* queries = ctx->queries;
char* queryTex = NULL;
int* queryAddrs = NULL;
int* queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char** names;
fprintf(stderr, "Loading query block... ");
char* queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile,
&queryTex,
&queryLen,
&queryAddrs,
&names,
&queryLengths,
&numQueries,
&num_match_coords,
device_mem_avail,
ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet* queries)
{
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics* stats)
{
gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope;
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size) {
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size) {
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics* stats,
char* stats_filename,
char* node_hist_filename = NULL,
char* child_hist_filename = NULL)
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
if (stats_filename) {
FILE* f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename);
}
else {
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f, "\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename) {
FILE* f = fopen(node_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename) {
FILE* f = fopen(child_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename);
}
else {
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
total_node_hits += ctx->statistics.node_hist[i];
if (i < 256) {
tree_top_node_hits += ctx->statistics.node_hist[i];
}
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
total_child_hits += ctx->statistics.child_hist[i];
if (i < 256) {
tree_top_child_hits += ctx->statistics.child_hist[i];
}
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n", (int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits / total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n", (int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits / total_child_hits);
#endif
}
void matchOnCPU(MatchContext* ctx, bool doRC)
{
//TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
REVERSE);
}
else {
computeGold(&ctx->results,
ctx->ref->str,
ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode*)(ctx->ref->h_node_tex_array),
(PixelOfChildren*)(ctx->ref->h_children_tex_array),
ctx->queries->count,
ctx->min_match_length,
FORWARD);
}
}
void matchOnGPU(MatchContext* ctx, bool doRC)
{
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
//TODO: GPU RC is disabled
mummergpuRCKernel<<<dimGrid, dimBlock, 0>>>(ctx->results.d_match_coords,
ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array,
ctx->queries->d_lengths_array,
numQueries,
ctx->min_match_length);
}
else {
Driver* drivers = nullptr;
CUDA_SAFE_CALL(cudaMalloc((void**)&drivers, sizeof(Driver) * dimGrid.x));
mummergpuKernel1<<<dimGrid, dimBlock, 0>>>(
drivers, ctx->results.d_match_coords, ctx->queries->d_tex_array, (char*)ctx->ref->d_ref_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length);
for (int i = 0; i < 81; ++i) {
mummergpuKernel2<<<dimGrid, dimBlock, 0>>>(
drivers, ctx->results.d_match_coords, ctx->queries->d_tex_array, (char*)ctx->ref->d_ref_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length);
}
CUDA_SAFE_CALL(cudaFree(drivers));
}
// check if kernel execution generated an error
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext* ctx,
unsigned int page_num)
{
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext* ctx,
ReferencePage* page,
bool reverse_complement)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
char* ktimer = createTimer();
fprintf(stderr, "Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board,
ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu) {
matchOnCPU(ctx, reverse_complement);
}
else {
matchOnGPU(ctx, reverse_complement);
cudaThreadSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext* ctx,
ReferencePage* page)
{
loadQueries(ctx);
fprintf(stderr,
"Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1],
page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu) {
getExactAlignments(ctx, page, false);
}
else {
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu)
{
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int* p = NULL;
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
CUDA_SAFE_CALL(cudaMalloc((void**)&p, sizeof(int)));
CUDA_SAFE_CALL(cudaFree(p));
}
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
free_mem, total_mem);
}
else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100) {
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
{
gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope;
lseek(ctx->queries->qfile, 0, SEEK_SET);
}
return 0;
}
void initReferencePages(MatchContext* ctx, int* num_pages, ReferencePage** pages_out)
{
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages,
sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i - 1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext* ctx)
{
int num_reference_pages = 0;
ReferencePage* pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref),
ctx->full_ref,
pages[0].begin,
pages[0].end,
ctx->min_match_length,
ctx->dotfilename,
ctx->texfilename,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref),
ctx->full_ref,
pages[i].begin,
pages[i].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref),
ctx->full_ref,
pages[last_page].begin,
pages[last_page].end,
ctx->min_match_length,
NULL,
NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C" int matchQueries(MatchContext* ctx)
{
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize, (1ULL << 30)));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char* ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out");
return ret;
}
|
91d36a057cf733ff1dd95d1dc1ed0aa2671f400c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*matMul_kernel.cu*/
#include "matMul_conf.h"
//matMul kernel
__global__ void matMulKernel(float *out, float* arrA, float* arrB, int nbelem){
int tidx = threadIdx.x;
int tidy = threadIdx.y;
*(out+nbelem*tidx+tidy) = 0;
if(tidx<nbelem & tidx<nbelem){
for(int k=0;k<nbelem;k++)
*(out+nbelem*tidx+tidy) += *(arrA+nbelem*tidx+k)*(*(arrB+nbelem*k+tidy));
}
}
| 91d36a057cf733ff1dd95d1dc1ed0aa2671f400c.cu | /*matMul_kernel.cu*/
#include "matMul_conf.h"
//matMul kernel
__global__ void matMulKernel(float *out, float* arrA, float* arrB, int nbelem){
int tidx = threadIdx.x;
int tidy = threadIdx.y;
*(out+nbelem*tidx+tidy) = 0;
if(tidx<nbelem & tidx<nbelem){
for(int k=0;k<nbelem;k++)
*(out+nbelem*tidx+tidy) += *(arrA+nbelem*tidx+k)*(*(arrB+nbelem*k+tidy));
}
}
|
f325dceb7598eb276708ddc73a246ae142b5d820.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bitonic_sort_step.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dev_values = NULL;
hipMalloc(&dev_values, XSIZE*YSIZE);
int j = 1;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bitonic_sort_step), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_values,j,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bitonic_sort_step), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_values,j,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bitonic_sort_step), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_values,j,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f325dceb7598eb276708ddc73a246ae142b5d820.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bitonic_sort_step.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dev_values = NULL;
cudaMalloc(&dev_values, XSIZE*YSIZE);
int j = 1;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bitonic_sort_step<<<gridBlock,threadBlock>>>(dev_values,j,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bitonic_sort_step<<<gridBlock,threadBlock>>>(dev_values,j,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bitonic_sort_step<<<gridBlock,threadBlock>>>(dev_values,j,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3f6badd3909870e0d2aa519bd20aaade56af4740.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/random/rng.hpp>
#include <rmm/device_uvector.hpp>
#include <test_utils.h>
#include <vector>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/neighbors/knn.hpp>
namespace ML {
using namespace raft::random;
using namespace std;
struct KNNInputs {
int n_rows;
int n_cols;
int n_centers;
int n_query_row;
int n_neighbors;
int n_parts;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const KNNInputs& dims)
{
return os;
}
template <typename T>
void gen_blobs(
raft::handle_t& handle, T* out, int* l, int rows, int cols, int centers, const T* centroids)
{
Datasets::make_blobs(handle,
out,
l,
rows,
cols,
centers,
true,
centroids,
nullptr,
0.1f,
true,
-10.0f,
10.0f,
1234ULL);
}
void create_index_parts(raft::handle_t& handle,
float* query_data,
int* query_labels,
vector<float*>& part_inputs,
vector<int*>& part_labels,
vector<int>& part_sizes,
const KNNInputs& params,
const float* centers)
{
hipStream_t stream = handle.get_stream();
gen_blobs<float>(handle,
query_data,
query_labels,
params.n_rows * params.n_parts,
params.n_cols,
params.n_centers,
centers);
for (int i = 0; i < params.n_parts; i++) {
part_inputs.push_back(query_data + (i * params.n_rows * params.n_cols));
part_labels.push_back(query_labels + (i * params.n_rows));
part_sizes.push_back(params.n_rows);
}
}
__global__ void to_float(float* out, int* in, int size)
{
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= size) return;
out[element] = float(in[element]);
}
__global__ void build_actual_output(
int* output, int n_rows, int k, const int* idx_labels, const int64_t* indices)
{
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= n_rows * k) return;
int ind = (int)indices[element];
output[element] = idx_labels[ind];
}
__global__ void build_expected_output(int* output, int n_rows, int k, const int* labels)
{
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
}
template <typename T>
class KNNTest : public ::testing::TestWithParam<KNNInputs> {
public:
KNNTest()
: params(::testing::TestWithParam<KNNInputs>::GetParam()),
stream(handle.get_stream()),
index_data(params.n_rows * params.n_cols * params.n_parts, stream),
index_labels(params.n_rows * params.n_parts, stream),
search_data(params.n_query_row * params.n_cols, stream),
search_labels(params.n_query_row, stream),
output_indices(params.n_query_row * params.n_neighbors * params.n_parts, stream),
output_dists(params.n_query_row * params.n_neighbors * params.n_parts, stream)
{
RAFT_CUDA_TRY(hipMemsetAsync(index_data.data(), 0, index_data.size() * sizeof(T), stream));
RAFT_CUDA_TRY(hipMemsetAsync(index_labels.data(), 0, index_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(hipMemsetAsync(search_data.data(), 0, search_data.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(search_labels.data(), 0, search_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(output_indices.data(), 0, output_indices.size() * sizeof(T), stream));
RAFT_CUDA_TRY(hipMemsetAsync(output_dists.data(), 0, output_dists.size() * sizeof(T), stream));
}
protected:
void testBruteForce()
{
rmm::device_uvector<int> actual_labels(params.n_query_row * params.n_neighbors * params.n_parts,
stream);
rmm::device_uvector<int> expected_labels(
params.n_query_row * params.n_neighbors * params.n_parts, stream);
RAFT_CUDA_TRY(
hipMemsetAsync(actual_labels.data(), 0, actual_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(expected_labels.data(), 0, expected_labels.size() * sizeof(T), stream));
create_data();
brute_force_knn(handle,
part_inputs,
part_sizes,
params.n_cols,
search_data.data(),
params.n_query_row,
output_indices.data(),
output_dists.data(),
params.n_neighbors,
true,
true);
hipLaunchKernelGGL(( build_actual_output), dim3(raft::ceildiv(params.n_query_row * params.n_neighbors, 32)),
dim3(32),
0,
stream, actual_labels.data(),
params.n_query_row,
params.n_neighbors,
index_labels.data(),
output_indices.data());
hipLaunchKernelGGL(( build_expected_output), dim3(raft::ceildiv(params.n_query_row, 32)), dim3(32), 0, stream,
expected_labels.data(), params.n_query_row, params.n_neighbors, search_labels.data());
ASSERT_TRUE(devArrMatch(expected_labels.data(),
actual_labels.data(),
params.n_query_row * params.n_neighbors,
raft::Compare<int>()));
}
void testClassification()
{
rmm::device_uvector<int> actual_labels(params.n_query_row, stream);
rmm::device_uvector<int> expected_labels(params.n_query_row, stream);
RAFT_CUDA_TRY(
hipMemsetAsync(actual_labels.data(), 0, actual_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(expected_labels.data(), 0, expected_labels.size() * sizeof(T), stream));
create_data();
brute_force_knn(handle,
part_inputs,
part_sizes,
params.n_cols,
search_data.data(),
params.n_query_row,
output_indices.data(),
output_dists.data(),
params.n_neighbors,
true,
true);
vector<int*> full_labels(1);
full_labels[0] = index_labels.data();
knn_classify(handle,
actual_labels.data(),
output_indices.data(),
full_labels,
params.n_rows * params.n_parts,
params.n_query_row,
params.n_neighbors);
ASSERT_TRUE(devArrMatch(
search_labels.data(), actual_labels.data(), params.n_query_row, raft::Compare<int>()));
}
void testRegression()
{
rmm::device_uvector<int> actual_labels(params.n_query_row, stream);
rmm::device_uvector<int> expected_labels(params.n_query_row, stream);
RAFT_CUDA_TRY(
hipMemsetAsync(actual_labels.data(), 0, actual_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(expected_labels.data(), 0, expected_labels.size() * sizeof(T), stream));
create_data();
brute_force_knn(handle,
part_inputs,
part_sizes,
params.n_cols,
search_data.data(),
params.n_query_row,
output_indices.data(),
output_dists.data(),
params.n_neighbors,
true,
true);
rmm::device_uvector<float> index_labels_float(params.n_rows * params.n_parts, stream);
rmm::device_uvector<float> query_labels_float(params.n_query_row, stream);
hipLaunchKernelGGL(( to_float), dim3(raft::ceildiv((int)index_labels_float.size(), 32)), dim3(32), 0, stream,
index_labels_float.data(), index_labels.data(), index_labels_float.size());
hipLaunchKernelGGL(( to_float), dim3(raft::ceildiv(params.n_query_row, 32)), dim3(32), 0, stream,
query_labels_float.data(), search_labels.data(), params.n_query_row);
handle.sync_stream(stream);
RAFT_CUDA_TRY(hipPeekAtLastError());
rmm::device_uvector<float> actual_labels_float(params.n_query_row, stream);
vector<float*> full_labels(1);
full_labels[0] = index_labels_float.data();
knn_regress(handle,
actual_labels_float.data(),
output_indices.data(),
full_labels,
params.n_rows,
params.n_query_row,
params.n_neighbors);
ASSERT_TRUE(raft::devArrMatch(query_labels_float.data(),
actual_labels_float.data(),
params.n_query_row,
raft::Compare<float>()));
}
private:
void create_data()
{
hipStream_t stream = handle.get_stream();
rmm::device_uvector<T> rand_centers(params.n_centers * params.n_cols, stream);
Rng r(0, GeneratorType::GenPhilox);
r.uniform(rand_centers.data(), params.n_centers * params.n_cols, -10.0f, 10.0f, stream);
// Create index parts
create_index_parts(handle,
index_data.data(),
index_labels.data(),
part_inputs,
part_labels,
part_sizes,
params,
rand_centers.data());
gen_blobs(handle,
search_data.data(),
search_labels.data(),
params.n_query_row,
params.n_cols,
params.n_centers,
rand_centers.data());
}
raft::handle_t handle;
hipStream_t stream = 0;
KNNInputs params;
rmm::device_uvector<float> index_data;
rmm::device_uvector<int> index_labels;
vector<float*> part_inputs;
vector<int*> part_labels;
vector<int> part_sizes;
rmm::device_uvector<float> search_data;
rmm::device_uvector<int> search_labels;
rmm::device_uvector<float> output_dists;
rmm::device_uvector<int64_t> output_indices;
};
const std::vector<KNNInputs> inputs = {{50, 5, 2, 25, 5, 2},
{50, 5, 2, 25, 10, 2},
{500, 5, 2, 25, 5, 7},
{500, 50, 2, 25, 10, 7},
{500, 50, 7, 25, 5, 7},
{50, 5, 3, 15, 5, 7}};
typedef KNNTest<float> KNNTestF;
TEST_P(KNNTestF, BruteForce) { this->testBruteForce(); }
TEST_P(KNNTestF, Classification) { this->testClassification(); }
TEST_P(KNNTestF, Regression) { this->testRegression(); }
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestF, ::testing::ValuesIn(inputs));
} // end namespace ML
| 3f6badd3909870e0d2aa519bd20aaade56af4740.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/random/rng.hpp>
#include <rmm/device_uvector.hpp>
#include <test_utils.h>
#include <vector>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/neighbors/knn.hpp>
namespace ML {
using namespace raft::random;
using namespace std;
struct KNNInputs {
int n_rows;
int n_cols;
int n_centers;
int n_query_row;
int n_neighbors;
int n_parts;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const KNNInputs& dims)
{
return os;
}
template <typename T>
void gen_blobs(
raft::handle_t& handle, T* out, int* l, int rows, int cols, int centers, const T* centroids)
{
Datasets::make_blobs(handle,
out,
l,
rows,
cols,
centers,
true,
centroids,
nullptr,
0.1f,
true,
-10.0f,
10.0f,
1234ULL);
}
void create_index_parts(raft::handle_t& handle,
float* query_data,
int* query_labels,
vector<float*>& part_inputs,
vector<int*>& part_labels,
vector<int>& part_sizes,
const KNNInputs& params,
const float* centers)
{
cudaStream_t stream = handle.get_stream();
gen_blobs<float>(handle,
query_data,
query_labels,
params.n_rows * params.n_parts,
params.n_cols,
params.n_centers,
centers);
for (int i = 0; i < params.n_parts; i++) {
part_inputs.push_back(query_data + (i * params.n_rows * params.n_cols));
part_labels.push_back(query_labels + (i * params.n_rows));
part_sizes.push_back(params.n_rows);
}
}
__global__ void to_float(float* out, int* in, int size)
{
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= size) return;
out[element] = float(in[element]);
}
__global__ void build_actual_output(
int* output, int n_rows, int k, const int* idx_labels, const int64_t* indices)
{
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= n_rows * k) return;
int ind = (int)indices[element];
output[element] = idx_labels[ind];
}
__global__ void build_expected_output(int* output, int n_rows, int k, const int* labels)
{
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
}
template <typename T>
class KNNTest : public ::testing::TestWithParam<KNNInputs> {
public:
KNNTest()
: params(::testing::TestWithParam<KNNInputs>::GetParam()),
stream(handle.get_stream()),
index_data(params.n_rows * params.n_cols * params.n_parts, stream),
index_labels(params.n_rows * params.n_parts, stream),
search_data(params.n_query_row * params.n_cols, stream),
search_labels(params.n_query_row, stream),
output_indices(params.n_query_row * params.n_neighbors * params.n_parts, stream),
output_dists(params.n_query_row * params.n_neighbors * params.n_parts, stream)
{
RAFT_CUDA_TRY(cudaMemsetAsync(index_data.data(), 0, index_data.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(index_labels.data(), 0, index_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(search_data.data(), 0, search_data.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(search_labels.data(), 0, search_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(output_indices.data(), 0, output_indices.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(output_dists.data(), 0, output_dists.size() * sizeof(T), stream));
}
protected:
void testBruteForce()
{
rmm::device_uvector<int> actual_labels(params.n_query_row * params.n_neighbors * params.n_parts,
stream);
rmm::device_uvector<int> expected_labels(
params.n_query_row * params.n_neighbors * params.n_parts, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(actual_labels.data(), 0, actual_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(expected_labels.data(), 0, expected_labels.size() * sizeof(T), stream));
create_data();
brute_force_knn(handle,
part_inputs,
part_sizes,
params.n_cols,
search_data.data(),
params.n_query_row,
output_indices.data(),
output_dists.data(),
params.n_neighbors,
true,
true);
build_actual_output<<<raft::ceildiv(params.n_query_row * params.n_neighbors, 32),
32,
0,
stream>>>(actual_labels.data(),
params.n_query_row,
params.n_neighbors,
index_labels.data(),
output_indices.data());
build_expected_output<<<raft::ceildiv(params.n_query_row, 32), 32, 0, stream>>>(
expected_labels.data(), params.n_query_row, params.n_neighbors, search_labels.data());
ASSERT_TRUE(devArrMatch(expected_labels.data(),
actual_labels.data(),
params.n_query_row * params.n_neighbors,
raft::Compare<int>()));
}
void testClassification()
{
rmm::device_uvector<int> actual_labels(params.n_query_row, stream);
rmm::device_uvector<int> expected_labels(params.n_query_row, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(actual_labels.data(), 0, actual_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(expected_labels.data(), 0, expected_labels.size() * sizeof(T), stream));
create_data();
brute_force_knn(handle,
part_inputs,
part_sizes,
params.n_cols,
search_data.data(),
params.n_query_row,
output_indices.data(),
output_dists.data(),
params.n_neighbors,
true,
true);
vector<int*> full_labels(1);
full_labels[0] = index_labels.data();
knn_classify(handle,
actual_labels.data(),
output_indices.data(),
full_labels,
params.n_rows * params.n_parts,
params.n_query_row,
params.n_neighbors);
ASSERT_TRUE(devArrMatch(
search_labels.data(), actual_labels.data(), params.n_query_row, raft::Compare<int>()));
}
void testRegression()
{
rmm::device_uvector<int> actual_labels(params.n_query_row, stream);
rmm::device_uvector<int> expected_labels(params.n_query_row, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(actual_labels.data(), 0, actual_labels.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(expected_labels.data(), 0, expected_labels.size() * sizeof(T), stream));
create_data();
brute_force_knn(handle,
part_inputs,
part_sizes,
params.n_cols,
search_data.data(),
params.n_query_row,
output_indices.data(),
output_dists.data(),
params.n_neighbors,
true,
true);
rmm::device_uvector<float> index_labels_float(params.n_rows * params.n_parts, stream);
rmm::device_uvector<float> query_labels_float(params.n_query_row, stream);
to_float<<<raft::ceildiv((int)index_labels_float.size(), 32), 32, 0, stream>>>(
index_labels_float.data(), index_labels.data(), index_labels_float.size());
to_float<<<raft::ceildiv(params.n_query_row, 32), 32, 0, stream>>>(
query_labels_float.data(), search_labels.data(), params.n_query_row);
handle.sync_stream(stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
rmm::device_uvector<float> actual_labels_float(params.n_query_row, stream);
vector<float*> full_labels(1);
full_labels[0] = index_labels_float.data();
knn_regress(handle,
actual_labels_float.data(),
output_indices.data(),
full_labels,
params.n_rows,
params.n_query_row,
params.n_neighbors);
ASSERT_TRUE(raft::devArrMatch(query_labels_float.data(),
actual_labels_float.data(),
params.n_query_row,
raft::Compare<float>()));
}
private:
void create_data()
{
cudaStream_t stream = handle.get_stream();
rmm::device_uvector<T> rand_centers(params.n_centers * params.n_cols, stream);
Rng r(0, GeneratorType::GenPhilox);
r.uniform(rand_centers.data(), params.n_centers * params.n_cols, -10.0f, 10.0f, stream);
// Create index parts
create_index_parts(handle,
index_data.data(),
index_labels.data(),
part_inputs,
part_labels,
part_sizes,
params,
rand_centers.data());
gen_blobs(handle,
search_data.data(),
search_labels.data(),
params.n_query_row,
params.n_cols,
params.n_centers,
rand_centers.data());
}
raft::handle_t handle;
cudaStream_t stream = 0;
KNNInputs params;
rmm::device_uvector<float> index_data;
rmm::device_uvector<int> index_labels;
vector<float*> part_inputs;
vector<int*> part_labels;
vector<int> part_sizes;
rmm::device_uvector<float> search_data;
rmm::device_uvector<int> search_labels;
rmm::device_uvector<float> output_dists;
rmm::device_uvector<int64_t> output_indices;
};
const std::vector<KNNInputs> inputs = {{50, 5, 2, 25, 5, 2},
{50, 5, 2, 25, 10, 2},
{500, 5, 2, 25, 5, 7},
{500, 50, 2, 25, 10, 7},
{500, 50, 7, 25, 5, 7},
{50, 5, 3, 15, 5, 7}};
typedef KNNTest<float> KNNTestF;
TEST_P(KNNTestF, BruteForce) { this->testBruteForce(); }
TEST_P(KNNTestF, Classification) { this->testClassification(); }
TEST_P(KNNTestF, Regression) { this->testRegression(); }
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestF, ::testing::ValuesIn(inputs));
} // end namespace ML
|
f5abca005c764f3c9fd592ce852af42d8878f498.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fft-cuda.cuh"
static __device__ __host__ inline float2 CplxAdd(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
static __device__ __host__ inline float2 CplxMul(float2 a, float2 b) {
float2 c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
static __device__ __host__ inline float2 CplxInv(float2 a) {
float2 c;
c.x = -a.x;
c.y = -a.y;
return c;
}
__global__ void bit_reorder(float2* da, float2* ra, int n, int s, int threads) {
int id = blockIdx.x * threads + threadIdx.x;
ra[id] = da[__brev(id) >> (32 - s)];
}
// __global__ void calc_fft(float2* ra, int k, int k_2, int start, int threads) {
// int tid = blockIdx.x * threads + threadIdx.x;
// int id = start * k_2 + tid;
// int id2 = id + k;
// float2 c;
// c.x = __cosf((2.0 * M_PI * tid) / (1.0 * k_2));
// c.y = -__sinf((2.0 * M_PI * tid) / (1.0 * k_2));
// float2 u, t;
// u = float2Add(ra[id], float2Mul(c, ra[id2]));
// t = float2Add(ra[id], float2Inv(float2Mul(c, ra[id2])));
// ra[id] = u;
// ra[id2] = t;
// }
__global__ void calc_fft(float2* ra, int m, int threads, bool isrev) {
int tid = blockIdx.x * threads + threadIdx.x;
if (tid % m < m / 2) {
int id = tid;
int id2 = id + m / 2;
float2 c;
c.x = __cosf((2.0 * M_PI * (tid % m)) / (1.0 * m));
if (isrev) {
c.y = -__sinf((2.0 * M_PI * (tid % m)) / (1.0 * m));
} else {
c.y = __sinf((2.0 * M_PI * (tid % m)) / (1.0 * m));
}
float2 u, t;
u = CplxAdd(ra[id], CplxMul(c, ra[id2]));
t = CplxAdd(ra[id], CplxInv(CplxMul(c, ra[id2])));
ra[id] = u;
ra[id2] = t;
}
}
void fft(float2* a, int n, int threads, bool isrev) {
size_t data_size = n * sizeof(float2);
float2* ra;
float2* da;
hipMalloc((void**)&ra, data_size);
hipMalloc((void**)&da, data_size);
hipMemcpy(da, a, data_size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
int s = log2((float)n);
hipLaunchKernelGGL(( bit_reorder), dim3(n/threads), dim3(threads), 0, 0, da, ra, n, s, threads);
// for (int i = 2; i < n; i *= 2) {
// for (int j = 0; j < n; j += i) {
// int k = i / 2;
// calc_fft<<<ceil(k/threads), threads>>>(ra, k, i, j, threads);
// }
// }
for (int i = 2; i <= n; i *= 2) {
hipLaunchKernelGGL(( calc_fft), dim3(ceil(n/threads)), dim3(threads), 0, 0, ra, i, threads, isrev);
}
// float2* result;
// result = (float2*)malloc(data_size);
hipMemcpy(a, ra, data_size, hipMemcpyDeviceToHost);
if (isrev) {
for (int i = 0; i < n; ++i) {
a[i].x /= (float)n;
a[i].y /= (float)n;
}
}
hipFree(da);
hipFree(ra);
}
| f5abca005c764f3c9fd592ce852af42d8878f498.cu | #include "fft-cuda.cuh"
static __device__ __host__ inline float2 CplxAdd(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
static __device__ __host__ inline float2 CplxMul(float2 a, float2 b) {
float2 c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
static __device__ __host__ inline float2 CplxInv(float2 a) {
float2 c;
c.x = -a.x;
c.y = -a.y;
return c;
}
__global__ void bit_reorder(float2* da, float2* ra, int n, int s, int threads) {
int id = blockIdx.x * threads + threadIdx.x;
ra[id] = da[__brev(id) >> (32 - s)];
}
// __global__ void calc_fft(float2* ra, int k, int k_2, int start, int threads) {
// int tid = blockIdx.x * threads + threadIdx.x;
// int id = start * k_2 + tid;
// int id2 = id + k;
// float2 c;
// c.x = __cosf((2.0 * M_PI * tid) / (1.0 * k_2));
// c.y = -__sinf((2.0 * M_PI * tid) / (1.0 * k_2));
// float2 u, t;
// u = float2Add(ra[id], float2Mul(c, ra[id2]));
// t = float2Add(ra[id], float2Inv(float2Mul(c, ra[id2])));
// ra[id] = u;
// ra[id2] = t;
// }
__global__ void calc_fft(float2* ra, int m, int threads, bool isrev) {
int tid = blockIdx.x * threads + threadIdx.x;
if (tid % m < m / 2) {
int id = tid;
int id2 = id + m / 2;
float2 c;
c.x = __cosf((2.0 * M_PI * (tid % m)) / (1.0 * m));
if (isrev) {
c.y = -__sinf((2.0 * M_PI * (tid % m)) / (1.0 * m));
} else {
c.y = __sinf((2.0 * M_PI * (tid % m)) / (1.0 * m));
}
float2 u, t;
u = CplxAdd(ra[id], CplxMul(c, ra[id2]));
t = CplxAdd(ra[id], CplxInv(CplxMul(c, ra[id2])));
ra[id] = u;
ra[id2] = t;
}
}
void fft(float2* a, int n, int threads, bool isrev) {
size_t data_size = n * sizeof(float2);
float2* ra;
float2* da;
cudaMalloc((void**)&ra, data_size);
cudaMalloc((void**)&da, data_size);
cudaMemcpy(da, a, data_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
int s = log2((float)n);
bit_reorder<<<n/threads, threads>>>(da, ra, n, s, threads);
// for (int i = 2; i < n; i *= 2) {
// for (int j = 0; j < n; j += i) {
// int k = i / 2;
// calc_fft<<<ceil(k/threads), threads>>>(ra, k, i, j, threads);
// }
// }
for (int i = 2; i <= n; i *= 2) {
calc_fft<<<ceil(n/threads), threads>>>(ra, i, threads, isrev);
}
// float2* result;
// result = (float2*)malloc(data_size);
cudaMemcpy(a, ra, data_size, cudaMemcpyDeviceToHost);
if (isrev) {
for (int i = 0; i < n; ++i) {
a[i].x /= (float)n;
a[i].y /= (float)n;
}
}
cudaFree(da);
cudaFree(ra);
}
|
59c9371b599ff6500eaf5f02421e81a39b19b157.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample implements a separable convolution filter
* of a 2D signal with a gaussian kernel.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cutil.h>
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b){
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// GPU convolution
////////////////////////////////////////////////////////////////////////////////
//Global macro, controlling innermost convolution loop unrolling
#define UNROLL_INNER
#include <convolutionSeparable_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// Data configuration
//////////////////////////////////------//////////////////////////////////////////////
//Image width should be aligned to maximum coalesced read/write size
//for best global memory performance in both row and column filter.
#ifdef __DEVICE_EMULATION__
//Reduce problem size to have reasonable emulation time
const int DATA_W = iAlignUp(256, 16);
const int DATA_H = 256;
#else
const int DATA_W = iAlignUp(8192, 16);
const int DATA_H = 1024;//CHANGE NUMBER OF ROWS HERE!
#endif
const int DATA_SIZE = DATA_W * DATA_H * sizeof(float);
const int KERNEL_SIZE = KERNEL_W * sizeof(float);
//////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////// filtering on the gpu wrapped for labview, matlab, etc /////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" __declspec(dllexport) void gpuFilter(float* h_Result, int dataW, int dataH)
//void gpuFilter(float* h_Result, float* h_Data, int dataW, int dataH)
{
////////////////////////// kernel init /////////////////////////////////////
//declare and allocate for kernel on host
float *h_Kernel, kernelSum = 0;
h_Kernel = (float *)malloc(KERNEL_SIZE);
//build the kernel
for(unsigned int i = 0; i < KERNEL_W; i++){
float dist = (float)(i - KERNEL_RADIUS) / (float)KERNEL_RADIUS;
h_Kernel[i] = expf(- dist * dist / 2);
kernelSum += h_Kernel[i];
}
for(unsigned int i = 0; i < KERNEL_W; i++)
h_Kernel[i] /= kernelSum;
//copy host kernel to device
CUDA_SAFE_CALL( hipMemcpyToSymbol(d_Kernel, h_Kernel, KERNEL_SIZE) );
free(h_Kernel);//free the host kernel memory
////////////////////////// end kernel init /////////////////////////////////////
////////////////////////////////fitlering procedure/////////////////////////////
//declare and initialize device variables
float *d_Data, *d_Result, *d_Temp;
CUDA_SAFE_CALL( hipMalloc( (void **)&d_Data, DATA_SIZE) );
CUDA_SAFE_CALL( hipMalloc( (void **)&d_Result, DATA_SIZE) );
CUDA_SAFE_CALL( hipMalloc( (void **)&d_Temp , DATA_SIZE) );
//transfer the data
CUDA_SAFE_CALL( hipMemcpy(d_Data, h_Result, DATA_SIZE, hipMemcpyHostToDevice) );
//setup for the call to the device
dim3 blockGridRows(iDivUp(DATA_W, ROW_TILE_W), DATA_H);
dim3 threadBlockRows(KERNEL_RADIUS_ALIGNED + ROW_TILE_W + KERNEL_RADIUS);
//call the device funtion
hipLaunchKernelGGL(( convolutionRowGPU), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_Result, d_Data, dataW, dataH);
//obtain the result
CUDA_SAFE_CALL( hipMemcpy(h_Result, d_Result, DATA_SIZE, hipMemcpyDeviceToHost) );
//clean up device memory
CUDA_SAFE_CALL( hipFree(d_Data) );
CUDA_SAFE_CALL( hipFree(d_Result) );
CUDA_SAFE_CALL( hipFree(d_Temp) );
//////////////////////////// end filtering procedure ////////////////////////////////
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
int __stdcall DllMain(void)
{
return 0;
}
| 59c9371b599ff6500eaf5f02421e81a39b19b157.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample implements a separable convolution filter
* of a 2D signal with a gaussian kernel.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cutil.h>
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b){
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// GPU convolution
////////////////////////////////////////////////////////////////////////////////
//Global macro, controlling innermost convolution loop unrolling
#define UNROLL_INNER
#include <convolutionSeparable_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// Data configuration
//////////////////////////////////------//////////////////////////////////////////////
//Image width should be aligned to maximum coalesced read/write size
//for best global memory performance in both row and column filter.
#ifdef __DEVICE_EMULATION__
//Reduce problem size to have reasonable emulation time
const int DATA_W = iAlignUp(256, 16);
const int DATA_H = 256;
#else
const int DATA_W = iAlignUp(8192, 16);
const int DATA_H = 1024;//CHANGE NUMBER OF ROWS HERE!
#endif
const int DATA_SIZE = DATA_W * DATA_H * sizeof(float);
const int KERNEL_SIZE = KERNEL_W * sizeof(float);
//////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////// filtering on the gpu wrapped for labview, matlab, etc /////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" __declspec(dllexport) void gpuFilter(float* h_Result, int dataW, int dataH)
//void gpuFilter(float* h_Result, float* h_Data, int dataW, int dataH)
{
////////////////////////// kernel init /////////////////////////////////////
//declare and allocate for kernel on host
float *h_Kernel, kernelSum = 0;
h_Kernel = (float *)malloc(KERNEL_SIZE);
//build the kernel
for(unsigned int i = 0; i < KERNEL_W; i++){
float dist = (float)(i - KERNEL_RADIUS) / (float)KERNEL_RADIUS;
h_Kernel[i] = expf(- dist * dist / 2);
kernelSum += h_Kernel[i];
}
for(unsigned int i = 0; i < KERNEL_W; i++)
h_Kernel[i] /= kernelSum;
//copy host kernel to device
CUDA_SAFE_CALL( cudaMemcpyToSymbol(d_Kernel, h_Kernel, KERNEL_SIZE) );
free(h_Kernel);//free the host kernel memory
////////////////////////// end kernel init /////////////////////////////////////
////////////////////////////////fitlering procedure/////////////////////////////
//declare and initialize device variables
float *d_Data, *d_Result, *d_Temp;
CUDA_SAFE_CALL( cudaMalloc( (void **)&d_Data, DATA_SIZE) );
CUDA_SAFE_CALL( cudaMalloc( (void **)&d_Result, DATA_SIZE) );
CUDA_SAFE_CALL( cudaMalloc( (void **)&d_Temp , DATA_SIZE) );
//transfer the data
CUDA_SAFE_CALL( cudaMemcpy(d_Data, h_Result, DATA_SIZE, cudaMemcpyHostToDevice) );
//setup for the call to the device
dim3 blockGridRows(iDivUp(DATA_W, ROW_TILE_W), DATA_H);
dim3 threadBlockRows(KERNEL_RADIUS_ALIGNED + ROW_TILE_W + KERNEL_RADIUS);
//call the device funtion
convolutionRowGPU<<<blockGridRows, threadBlockRows>>>( d_Result, d_Data, dataW, dataH);
//obtain the result
CUDA_SAFE_CALL( cudaMemcpy(h_Result, d_Result, DATA_SIZE, cudaMemcpyDeviceToHost) );
//clean up device memory
CUDA_SAFE_CALL( cudaFree(d_Data) );
CUDA_SAFE_CALL( cudaFree(d_Result) );
CUDA_SAFE_CALL( cudaFree(d_Temp) );
//////////////////////////// end filtering procedure ////////////////////////////////
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////
int __stdcall DllMain(void)
{
return 0;
}
|
3c546fd0871138f5ac3fd03dd5616ff2d2076585.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include "../common/book.h"
__global__ void kernel(int a,int b,int *c){
*c=a+b;
}
int main(void)
{
int c;
int *dev_c;
HANDLE_ERROR(hipMalloc((void**)&dev_c,sizeof(int)));
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, 2,7,dev_c);
//printf("Hello,World!\n");
HANDLE_ERROR(hipMemcpy(&c,dev_c,
sizeof(int),
hipMemcpyDeviceToHost));
printf("2+7=%d\n",c);
hipFree(dev_c);
return 0;
}
| 3c546fd0871138f5ac3fd03dd5616ff2d2076585.cu | #include<iostream>
#include "../common/book.h"
__global__ void kernel(int a,int b,int *c){
*c=a+b;
}
int main(void)
{
int c;
int *dev_c;
HANDLE_ERROR(cudaMalloc((void**)&dev_c,sizeof(int)));
kernel<<<1,1>>>(2,7,dev_c);
//printf("Hello,World!\n");
HANDLE_ERROR(cudaMemcpy(&c,dev_c,
sizeof(int),
cudaMemcpyDeviceToHost));
printf("2+7=%d\n",c);
cudaFree(dev_c);
return 0;
}
|
79aaedbd159308ca8e5a9f26132df0e4d92b65f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"CudaHelper.cuh"
#include"CudaInterface.hpp"
typedef unsigned char uchar;
typedef unsigned short ushort;
extern int currCudaDeviceID;
#define RELU(x) fmaxf(x, 0.0f)
#define L2 0
#define L3 1
#define L4 2
#define L5 3
#define L6 4
#define L7 5
#define L8 6
#define L9 7
#define CHANNEL1TO8(n, Level) \
tl * HDNL##Level##kernelsL1[n * 9 + 0] + tc * HDNL##Level##kernelsL1[n * 9 + 1] + tr * HDNL##Level##kernelsL1[n * 9 + 2] + \
ml * HDNL##Level##kernelsL1[n * 9 + 3] + mc * HDNL##Level##kernelsL1[n * 9 + 4] + mr * HDNL##Level##kernelsL1[n * 9 + 5] + \
bl * HDNL##Level##kernelsL1[n * 9 + 6] + bc * HDNL##Level##kernelsL1[n * 9 + 7] + br * HDNL##Level##kernelsL1[n * 9 + 8] + HDNL##Level##biasL1[n]
#define CHANNEL8TO8(n, Level) \
tl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 0] + tc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 1] + tr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 2] + \
ml1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 3] + mc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 4] + mr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 5] + \
bl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 6] + bc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 7] + br1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 8] + \
tl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 0] + tc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 1] + tr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 2] + \
ml1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 3] + mc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 4] + mr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 5] + \
bl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 6] + bc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 7] + br1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 8] + \
tl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 0] + tc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 1] + tr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 2] + \
ml1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 3] + mc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 4] + mr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 5] + \
bl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 6] + bc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 7] + br1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 8] + \
tl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 0] + tc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 1] + tr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 2] + \
ml1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 3] + mc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 4] + mr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 5] + \
bl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 6] + bc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 7] + br1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 8] + \
tl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 0] + tc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 1] + tr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 2] + \
ml2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 3] + mc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 4] + mr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 5] + \
bl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 6] + bc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 7] + br2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 8] + \
tl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 0] + tc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 1] + tr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 2] + \
ml2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 3] + mc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 4] + mr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 5] + \
bl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 6] + bc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 7] + br2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 8] + \
tl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 0] + tc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 1] + tr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 2] + \
ml2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 3] + mc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 4] + mr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 5] + \
bl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 6] + bc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 7] + br2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 8] + \
tl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 0] + tc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 1] + tr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 2] + \
ml2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 3] + mc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 4] + mr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 5] + \
bl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 6] + bc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 7] + br2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 8] + HDNL##Level##biasL[L][n]
#define RUNKERNEL(Level, type) \hipLaunchKernelGGL((
conv1To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , inTex, surf1, param->orgW, param->orgH); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf1, surf2, param->orgW, param->orgH, L2); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf2, surf1, param->orgW, param->orgH, L3); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf1, surf2, param->orgW, param->orgH, L4); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf2, surf1, param->orgW, param->orgH, L5); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf1, surf2, param->orgW, param->orgH, L6); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf2, surf1, param->orgW, param->orgH, L7); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf1, surf2, param->orgW, param->orgH, L8); \hipLaunchKernelGGL((
conv8To8HDNL##Level) , dim3(dimGrid), dim3(dimBlock), 0, stream , surf2, surf1, param->orgW, param->orgH, L9); \hipLaunchKernelGGL((
convTranspose8To1HDNL##Level<type>) , dim3(dimGridout), dim3(dimBlock), 0, stream , surf1, outSurf, W, H);
inline __device__ float clamp(float f, float a, float b)
{
return fmaxf(a, fminf(f, b));
}
static __device__ __constant__ const float HDNL0kernelsL1[9 * 8] =
{
0.0609, 0.1027, -0.0447,
-0.1423, 0.7196, 0.1803,
0.0842, 0.0696, 0.0082,
0.0089, 0.1540, -0.8589,
0.0448, 0.8659, -0.2420,
-0.0364, 0.0585, 0.0125,
-0.1937, 0.7259, 0.0119,
-0.8266, 0.4147, 0.0088,
-0.0453, -0.0451, -0.0182,
0.0264, -0.9422, 0.1258,
-0.0543, 0.1282, 0.7102,
-0.0106, 0.0386, -0.0141,
0.2054, -0.0393, 0.1494,
0.3106, 0.5722, 0.2640,
0.1708, -0.1640, -0.0212,
0.0558, -0.2887, -0.1666,
0.3123, -0.3097, -0.2281,
0.2880, 0.3001, 0.0526,
-0.0320, 0.0584, -0.0193,
-0.0135, 1.0649, -0.1246,
0.0283, -0.3030, -0.6378,
-0.0040, -0.9122, 0.0181,
0.0365, 0.8947, -0.0420,
-0.0199, 0.0217, 0.0060
};
static __device__ __constant__ const float HDNL0biasL1[8] =
{
-0.7577, -0.0210, 0.0292, -0.0189, 0.0223, 0.0340, 0.0150, -0.0044
};
static __device__ __constant__ const float HDNL0kernelsL[8][9 * 8 * 8] =
{
{
2.0611e-01, 6.6865e-02, -9.9123e-02,
8.5279e-02, -4.5549e-02, -2.9491e-02,
-1.0358e-01, -2.4844e-02, -8.1539e-03,
-1.1308e-01, -6.4228e-02, -8.8081e-02,
2.7810e-02, -1.6054e-01, -1.1985e-01,
-2.8679e-01, -1.7785e-02, 1.1559e-01,
2.1614e-02, -6.8870e-02, -2.4707e-01,
9.6867e-02, -1.6561e-01, 2.8281e-02,
-8.2469e-02, -9.8554e-02, -1.7147e-02,
3.3710e-01, 9.2126e-02, 3.6880e-02,
5.7004e-02, 4.0175e-02, 1.6116e-01,
2.5629e-01, 5.1154e-01, 2.4119e-02,
1.9495e-02, 2.6940e-01, -1.4050e-01,
5.0325e-02, -4.5920e-02, -1.3586e-01,
5.9458e-02, 1.3860e-01, -2.1065e-01,
-1.0744e-01, -1.5915e-01, -1.1528e-02,
-1.1470e-01, 6.3455e-02, -5.5558e-02,
-6.9920e-02, -3.0142e-02, -4.9059e-02,
3.6421e-01, 3.0252e-01, -1.3562e-01,
1.5238e-01, -1.9868e-01, -3.2644e-02,
-4.2849e-02, 1.3677e-02, 7.3854e-02,
7.6609e-02, -1.0121e-01, 3.6319e-02,
9.3536e-02, 6.0386e-02, 1.0086e-01,
-2.6630e-01, 2.5875e-02, -1.9225e-01,
4.0687e-02, 1.1005e-01, 9.9578e-03,
1.6939e-01, 5.0872e-01, 8.9876e-02,
6.9561e-02, 1.1910e-01, -1.8091e-02,
-3.5739e-02, -7.5300e-02, -1.6788e-02,
3.0316e-02, 1.5942e-01, -9.0878e-02,
-6.3737e-02, 2.6141e-02, 8.8040e-03,
3.4954e-03, -6.6707e-02, 1.4551e-01,
7.6258e-02, 1.4893e-01, -1.5255e-01,
6.2442e-02, 2.2166e-01, 7.5327e-02,
5.4785e-02, -1.4503e-02, -1.5188e-03,
1.6748e-01, -5.2731e-03, -1.9900e-02,
4.4786e-02, -1.0669e-01, 1.3192e-01,
1.9961e-02, -8.1015e-02, -3.2264e-02,
1.0544e-01, 1.8844e-01, 7.4274e-03,
6.6729e-02, -7.8318e-02, 3.0775e-02,
-8.6109e-03, 7.4977e-02, 9.4079e-02,
-1.2726e-01, -2.9664e-01, 7.8153e-03,
-4.8413e-02, -1.8450e-01, -7.1065e-02,
-8.7609e-02, -7.7192e-02, 5.0919e-02,
-1.4021e-01, 3.5696e-01, 1.2079e-02,
-2.0318e-02, -1.8827e-02, 3.9084e-02,
-2.8654e-02, -6.4166e-02, 5.4889e-02,
8.2689e-02, 8.4463e-02, 2.2339e-02,
1.0805e-01, -1.2566e-01, 1.7109e-01,
-6.1338e-02, -3.4043e-02, 4.0473e-02,
6.3821e-02, 1.7626e-01, -5.8112e-02,
-9.5002e-02, 1.3327e-02, 1.2242e-01,
4.9008e-02, -4.3678e-02, 2.2362e-02,
-7.7903e-02, -3.8252e-02, -5.2271e-02,
-1.8884e-02, -1.2859e-01, 4.1172e-02,
-3.1181e-02, 3.2348e-02, -4.9081e-02,
-6.7966e-02, -2.4896e-02, -6.5323e-02,
8.0742e-02, 2.6093e-01, -2.4638e-01,
-8.0881e-02, -2.9643e-02, -7.9627e-02,
1.4020e-01, 2.1575e-01, 8.1244e-03,
2.1561e-01, -2.9305e-01, -2.5535e-02,
-8.5538e-02, -1.4456e-01, -7.5664e-02,
-3.9921e-02, 4.0659e-02, 1.7812e-01,
1.1580e-01, 5.6628e-02, 9.0008e-02,
-2.2384e-02, -1.9788e-02, -4.0547e-02,
1.0070e-01, 2.9581e-01, 1.9936e-01,
-1.1957e-01, -8.6508e-02, -8.2543e-04,
-5.2879e-02, 1.5486e-01, 1.0829e-02,
1.4716e-01, 3.4257e-01, -3.2058e-03,
-2.1687e-02, 5.8641e-02, -6.3806e-02,
-3.2607e-02, 7.3328e-02, -6.4738e-03,
-1.0031e-01, -1.7698e-01, -9.4201e-02,
-3.3644e-02, -3.5860e-01, -9.3200e-02,
-7.4142e-02, 5.5001e-02, 4.3741e-02,
-2.2447e-03, 1.1941e-01, -1.6135e-02,
-1.4764e-02, -1.0194e-02, 3.2540e-02,
-1.0588e-01, -2.3000e-01, -1.1557e-02,
-9.0254e-02, 2.3352e-01, -1.3622e-01,
-1.9256e-03, -5.3372e-02, 1.0314e-01,
-2.0100e-02, 1.0700e-01, 1.6108e-01,
2.8422e-02, 2.7909e-01, 3.8342e-01,
1.4025e-02, 9.0965e-02, 2.0218e-01,
3.3562e-03, 7.6652e-02, 4.5974e-02,
-1.3617e-02, -1.4014e-01, -1.9253e-02,
1.1020e-01, -1.9678e-01, 6.7123e-02,
-3.3294e-02, -1.3006e-01, -1.0111e-01,
5.5813e-02, 2.1127e-01, 2.0248e-02,
-9.6386e-04, -5.2497e-03, 1.1134e-01,
2.8910e-02, 1.2229e-01, 1.8439e-01,
1.6413e-02, 1.5870e-01, -1.1616e-01,
-1.6032e-03, -6.8258e-03, -2.1883e-02,
1.2052e-01, -2.1982e-02, -1.3088e-01,
2.8664e-02, -5.0670e-02, 2.2927e-01,
2.0461e-02, 7.7250e-03, -2.6630e-02,
-9.0406e-02, -1.4174e-01, 9.8969e-02,
-6.6573e-02, -2.4425e-01, -3.5126e-02,
9.3859e-02, 1.9058e-01, -1.6569e-01,
-4.9163e-03, 7.4149e-02, 6.3345e-02,
-1.7888e-02, -9.1876e-02, 1.3728e-01,
-9.6098e-02, -3.4814e-02, -1.0862e-02,
4.8031e-03, 2.5206e-01, 8.0316e-02,
1.5102e-01, 4.1236e-02, 2.2339e-01,
2.8500e-01, 1.5106e-01, 9.6321e-04,
-6.0741e-02, 3.5759e-02, -1.8829e-01,
-1.1295e-03, -6.2322e-02, 8.4974e-01,
-3.9817e-02, -2.0666e-01, 2.2961e-01,
3.6857e-02, -2.0211e-02, -9.3342e-02,
2.0827e-02, 6.8874e-02, -6.0287e-02,
-6.9724e-02, 1.4423e-01, -7.6017e-02,
1.4718e-02, 1.8990e-01, 1.1789e-01,
-1.5018e-01, -2.3071e-01, 1.7511e-01,
-7.7605e-02, 5.0621e-02, -1.0381e-01,
8.6845e-02, -1.2410e-01, -4.4669e-01,
2.7930e-02, -5.4713e-02, -7.7923e-02,
8.6000e-02, -2.6371e-02, -8.6541e-02,
-1.1521e-01, 1.4389e-01, 5.0507e-02,
-1.6618e-02, -2.5150e-01, -4.9759e-02,
7.7166e-02, 4.5033e-03, -5.4649e-02,
2.8548e-03, -2.8078e-03, 8.1129e-02,
-4.5973e-02, 3.6740e-03, 2.0746e-01,
-9.8191e-02, 1.2807e-01, 8.1950e-03,
1.4240e-01, 1.5104e-01, 6.9624e-02,
2.2309e-01, 2.5688e-01, 9.4766e-02,
6.2560e-02, 7.1347e-02, 4.1432e-02,
-3.1829e-02, 1.5207e-01, 2.0575e-02,
-1.2506e-01, 2.9274e-01, 9.4712e-02,
-2.0520e-01, 4.9894e-04, 5.6171e-02,
-4.1567e-03, 6.6753e-02, -1.5767e-01,
6.3768e-02, 8.3008e-02, -3.5639e-01,
4.4660e-02, 2.6996e-01, -6.4014e-02,
8.5475e-02, 1.7854e-02, -6.4079e-02,
1.8760e-01, 1.5285e-01, -3.5614e-02,
1.0747e-02, -3.1330e-01, -4.8664e-02,
7.2150e-02, 1.7570e-01, 1.6716e-01,
6.2431e-02, 2.3755e-01, 2.8554e-01,
3.5791e-02, 2.8185e-01, 1.5810e-01,
-4.0886e-02, 1.8833e-02, -8.2903e-03,
1.3994e-02, -1.0846e-01, 3.5315e-02,
-6.2674e-02, 6.2806e-02, 2.2168e-02,
-3.6236e-01, -2.5326e-01, 5.6331e-02,
9.8762e-02, 3.8049e-01, 5.9885e-02,
-3.0541e-02, 7.9855e-02, -5.8639e-02,
1.1104e-03, 1.7147e-02, 3.3115e-02,
-3.3663e-02, 7.4615e-02, 6.4211e-02,
-7.3441e-02, -1.5568e-01, 7.6546e-02,
6.1802e-02, -1.5300e-01, -1.8209e-02,
-9.2786e-03, 1.6622e-01, 1.1354e-01,
9.5865e-03, -2.4226e-02, -1.4750e-03,
-5.5294e-02, -1.1839e-01, 3.8867e-03,
1.7262e-01, 4.2743e-01, 6.8970e-02,
-2.0232e-01, -1.4564e-01, 2.3025e-02,
-2.6139e-03, -1.6907e-02, 1.1693e-01,
-9.4871e-03, 3.8488e-02, -4.8351e-02,
-9.2171e-02, 4.8227e-02, 9.7378e-02,
-1.0292e-01, -1.2084e-01, -9.6676e-02,
1.8103e-02, 3.0658e-01, -7.7755e-02,
-2.4362e-02, -1.9862e-01, -6.9665e-02,
8.2944e-03, -1.4680e-01, -1.7371e-02,
-1.6534e-01, 2.5752e-01, 1.1129e-01,
-9.4151e-02, -1.3225e-01, 1.5933e-01,
9.0723e-02, 5.5469e-02, -1.4091e-01,
8.3404e-02, 1.3741e-01, -3.5438e-02,
3.2681e-02, 2.8491e-02, 1.4278e-02,
2.3789e-01, -2.3687e-03, -5.3264e-03,
-1.1161e-01, 1.9351e-02, 5.0832e-02,
8.2246e-03, 2.9892e-02, -3.7197e-02,
4.8236e-02, 1.6945e-01, 1.3673e-01,
1.1236e-01, 7.2318e-01, -4.1618e-02,
2.7494e-01, 1.0081e-01, -8.5399e-03,
-5.6151e-02, 8.1212e-02, -7.5770e-02,
2.7872e-02, 9.4644e-02, 1.1175e-02,
-6.1539e-02, 7.7395e-02, -3.2495e-02,
-5.1640e-02, 2.1028e-03, 1.5825e-02,
-1.1004e-01, 2.3153e-01, -6.1653e-02,
-2.6497e-02, 5.9461e-01, 4.0865e-02,
-1.9956e-02, 7.9328e-02, -1.7002e-02,
-5.5930e-03, 5.2015e-02, 7.7945e-04,
1.0136e-02, -9.0111e-02, -1.1175e-01,
-3.1781e-02, 1.4686e-01, -7.5718e-03,
1.1036e-02, 2.4618e-01, 8.5951e-02,
3.4775e-02, -1.2184e-01, 1.8010e-01,
-3.6781e-02, -1.3912e-01, -4.9172e-02,
3.3064e-02, 5.0582e-01, 1.0713e-02,
-1.2934e-02, -1.7697e-01, -1.4954e-01,
2.2229e-02, -5.8568e-03, -5.0186e-02,
1.9648e-02, -1.1302e-01, 1.5629e-02,
-3.5015e-02, 9.5032e-02, -2.9677e-02,
9.5173e-02, -3.0330e-02, -3.7652e-02,
-2.6097e-03, 7.4723e-01, -7.6234e-03,
-3.8826e-02, 1.0191e-01, 3.6589e-03,
-2.6503e-02, -1.1133e-01, -2.2029e-02,
-1.9101e-01, -2.1108e-01, -7.4371e-02,
-7.9349e-02, -1.0405e-01, 5.0315e-02
}
,
{
-4.2606e-02, -8.9001e-02, -6.4006e-02,
1.1132e-01, 7.6609e-02, 8.6417e-02,
7.6477e-03, -1.6416e-02, -8.2094e-02,
1.0779e-01, 2.1837e-01, 1.8094e-01,
-2.6306e-02, -1.2452e-01, 1.2662e-02,
3.1633e-02, 1.8717e-02, 3.1043e-02,
4.0927e-02, 5.0311e-02, 1.1648e-01,
2.2429e-01, 2.0757e-01, 4.3662e-03,
3.6341e-02, -4.7637e-02, 8.3645e-02,
-8.9260e-03, 1.8507e-02, 7.9069e-02,
-1.9411e-01, -8.6847e-02, -3.6639e-03,
4.0328e-02, -3.6821e-02, -8.5387e-02,
5.8173e-02, 5.9991e-02, -3.1398e-02,
1.5818e-01, 3.0861e-01, -2.3818e-02,
1.2176e-01, 6.7520e-02, 8.9401e-02,
-2.8859e-02, -1.2237e-01, -1.0625e-01,
3.1675e-02, 1.4172e-01, -1.4373e-01,
1.4653e-02, 1.0205e-01, 6.2557e-02,
-8.7292e-02, -2.1255e-02, 3.6830e-02,
-5.4417e-02, 3.0501e-01, 1.6897e-01,
-2.2187e-02, -8.9609e-02, -2.2830e-02,
4.9846e-02, 3.3395e-01, -3.1561e-02,
-1.3191e-02, 4.2663e-01, -6.9727e-02,
1.4570e-02, -4.0002e-02, 5.6394e-02,
-8.2547e-02, 1.9249e-01, 1.5591e-01,
1.4536e-01, -1.0409e-01, 1.2382e-01,
1.8189e-01, 9.2917e-02, -1.4394e-01,
-5.6260e-02, -2.7043e-01, 1.5392e-02,
-1.4305e-02, 1.1131e-01, -8.5913e-02,
7.7914e-02, -6.5484e-03, -1.8375e-01,
-1.4059e-01, -5.7339e-01, -3.9073e-02,
-1.1701e-01, -3.1806e-02, 7.7726e-02,
2.1688e-02, 9.9297e-02, 3.8224e-02,
7.9884e-02, 5.2461e-02, 1.0318e-01,
4.0054e-02, 1.4695e-01, 1.2577e-01,
-1.8790e-03, -4.9421e-02, 2.3235e-02,
-8.9820e-02, -1.6994e-01, -1.5986e-01,
2.3436e-01, -1.5346e-01, 1.5014e-02,
-3.9139e-02, -7.9388e-02, -4.9057e-02,
-1.1193e-01, -2.5705e-01, 1.1995e-01,
5.7929e-02, 2.4988e-01, -4.9406e-03,
-3.9363e-02, -1.1691e-02, -1.2236e-03,
-2.0521e-01, 2.1901e-01, 1.5957e-01,
2.1062e-01, -1.4157e-01, -3.4340e-01,
3.8520e-02, -2.0820e-01, 2.4570e-03,
1.7211e-01, 2.0214e-01, 1.3821e-01,
-7.1520e-02, 1.4847e-01, -1.3820e-01,
-2.4712e-02, -1.5925e-02, 1.7403e-02,
-3.7515e-02, 3.0461e-02, -2.7543e-02,
8.6148e-02, -6.1486e-02, 1.2610e-02,
2.9748e-03, 1.1778e-01, 2.9032e-02,
-2.1706e-02, -2.2406e-02, 2.6769e-02,
-3.6965e-02, 2.2180e-01, -4.0929e-02,
-3.2629e-03, 8.3419e-02, -1.4587e-01,
-1.3909e-02, -2.0166e-02, -1.0029e-01,
7.6360e-02, 8.0819e-02, -1.0933e-01,
-5.8919e-02, 2.4745e-02, 3.7375e-02,
-1.1333e-02, 1.4747e-02, -7.8958e-02,
-3.1535e-02, 1.7403e-01, 1.3946e-02,
-3.2038e-02, 5.1151e-02, -6.1063e-02,
-8.6472e-03, -6.9689e-02, 5.6846e-03,
5.7914e-02, -1.9818e-01, -7.5321e-02,
8.7453e-02, 7.8354e-02, 2.1997e-02,
-4.7606e-02, 1.3915e-01, 1.1653e-01,
9.6050e-02, 4.0099e-01, 1.5631e-01,
3.1492e-02, 2.4797e-01, 6.8716e-02,
-6.2664e-03, 9.1754e-02, -5.7244e-03,
1.3538e-01, 1.5366e-01, 9.4916e-02,
-4.2115e-02, -3.6585e-01, -1.4559e-01,
9.1550e-02, -5.4007e-02, 6.7482e-02,
-1.8687e-01, 3.2120e-01, 5.1031e-03,
-6.1205e-02, -5.1780e-02, 1.6442e-02,
-1.2316e-02, -1.3907e-01, -1.4446e-01,
-2.7899e-01, -8.5969e-02, -1.0870e-01,
-2.6157e-01, 8.9532e-02, 3.0958e-02,
-1.5393e-01, -4.2781e-02, -2.0951e-01,
2.0328e-01, 4.5317e-01, -3.0467e-02,
-6.1346e-02, 1.0381e-01, -1.3719e-01,
-9.8572e-02, -1.4035e-01, -1.9431e-02,
2.5542e-02, 3.2609e-01, 1.7983e-03,
-1.0800e-01, -2.9022e-02, 6.2691e-03,
2.8937e-02, -1.3483e-01, -4.1655e-02,
2.0172e-01, 1.4283e-02, 9.6200e-02,
1.9027e-02, 3.1240e-01, -2.9553e-02,
6.2776e-02, 1.3845e-01, 4.5834e-02,
-2.3854e-01, -4.0267e-02, 1.5634e-02,
-1.9246e-01, -3.2332e-02, 3.2442e-03,
-6.1880e-02, -8.8192e-02, -6.0172e-02,
2.5002e-01, 1.5148e-01, 6.4459e-02,
-2.1022e-01, -8.3893e-02, 6.9554e-03,
7.0244e-02, -2.9551e-02, 1.6481e-02,
-3.1036e-02, -2.0026e-01, -8.4748e-02,
-1.3108e-01, -1.3784e-01, 9.4900e-02,
-2.1256e-01, -4.1767e-02, 8.4665e-02,
-4.0235e-01, 1.0604e-01, -3.1827e-02,
-4.9825e-02, -9.1267e-04, 1.5527e-02,
-6.5729e-03, -1.8932e-02, -3.4591e-02,
1.1066e-01, 9.3979e-02, 2.6059e-02,
-1.2395e-01, -2.4768e-01, -1.6304e-01,
8.8329e-03, -2.1606e-02, -4.0878e-02,
-1.5581e-02, -1.4829e-02, -1.5959e-02,
-1.0463e-04, -4.2903e-03, -4.6657e-02,
2.2995e-02, 1.7917e-02, -9.1404e-02,
-1.2326e-01, 1.4582e-01, -7.0959e-02,
-1.8058e-02, -8.5228e-02, 4.2799e-02,
-2.2829e-03, 8.6577e-02, -1.1909e-01,
-1.8061e-01, 1.1166e-01, -8.2255e-02,
-1.3190e-01, 7.7123e-02, 2.3224e-02,
1.8661e-02, 2.4461e-02, 3.6060e-02,
-4.5224e-02, -1.7672e-01, 1.6080e-01,
-4.2175e-01, -2.2557e-01, -1.0719e-01,
-2.9506e-02, 9.5020e-02, -6.6465e-02,
-7.2627e-02, 3.1236e-01, 5.5764e-02,
-2.8789e-01, -1.8915e-01, 9.0825e-02,
-5.8618e-02, 6.4082e-02, 4.8461e-03,
-5.9405e-02, 3.2644e-01, -7.1278e-02,
-1.8084e-01, 2.0858e-02, -9.3690e-03,
-7.6565e-03, -9.6854e-02, 7.6121e-03,
1.4791e-01, 4.5612e-01, 1.9889e-02,
-5.5498e-02, -1.1266e-01, 2.2790e-02,
-3.8821e-02, -1.5780e-02, 1.2549e-02,
-3.8232e-02, -2.8870e-01, 2.6216e-02,
1.0375e-01, -2.9621e-02, 1.8479e-03,
5.0207e-02, 1.5189e-01, 1.2533e-01,
1.8298e-01, -1.2870e-01, 3.0681e-01,
-1.9571e-02, -8.6302e-02, 9.1121e-02,
1.0113e-01, -1.8362e-01, 3.2642e-02,
1.7034e-01, -3.1077e-01, -4.8737e-02,
5.9144e-02, 5.6052e-03, 3.2360e-02,
-9.0123e-02, 7.7996e-02, 3.6297e-02,
-3.4389e-01, 1.1841e-01, -2.0900e-02,
9.4930e-02, -9.1504e-02, -4.5308e-02,
3.7723e-03, -3.7580e-02, -6.6410e-02,
5.2501e-02, -1.2530e-01, 3.5944e-02,
3.8378e-02, 9.5188e-02, 2.1952e-03,
-2.4333e-02, 2.7977e-01, 5.6961e-02,
-3.0605e-03, 8.3684e-02, 4.4848e-03,
-7.8935e-02, -1.9544e-01, -5.3311e-02,
-2.6595e-02, 1.2278e-01, -3.1659e-02,
-1.0103e-02, 4.7763e-01, 2.5359e-02,
8.1397e-02, 3.0548e-01, 9.7097e-02,
3.6232e-02, -1.1091e-01, 1.2841e-01,
1.9277e-01, 2.9322e-01, -1.6740e-01,
1.2107e-01, -6.2883e-02, 4.0603e-02,
-1.5750e-01, -8.6183e-02, -1.4194e-01,
1.1932e-01, -3.9175e-01, -5.4495e-02,
-1.4001e-02, -2.0594e-01, -8.2683e-02,
8.6156e-02, 2.1499e-02, 2.2080e-01,
5.5703e-02, -3.6307e-01, 8.3129e-02,
8.9280e-02, -3.5897e-02, 1.6106e-01,
9.1171e-02, -3.1102e-01, 1.2425e-01,
1.0278e-01, -3.1014e-01, -6.9138e-02,
8.0839e-02, -3.6183e-02, 1.0341e-01,
-1.8334e-01, -5.3700e-02, 2.3336e-01,
-1.4464e-01, -5.0320e-01, -2.9836e-02,
-1.7225e-01, -3.9499e-01, -1.7321e-01,
1.7510e-01, 1.7897e-01, -2.6518e-01,
2.3638e-01, 5.0270e-01, -4.9731e-03,
2.2603e-01, 2.5317e-01, 2.4079e-01,
-1.3159e-01, 1.5638e-01, 1.2480e-01,
-6.2164e-02, 7.9458e-02, -9.4804e-02,
8.5690e-03, 7.4971e-03, 8.6630e-02,
-1.3148e-02, 6.8660e-02, -7.4230e-03,
2.9702e-02, 1.2036e-01, 9.5504e-02,
-3.2694e-03, 8.6722e-02, -6.2433e-02,
3.2527e-01, 3.2087e-01, -9.4429e-05,
1.3556e-01, -7.0413e-02, 2.9383e-02,
2.0617e-02, 3.3218e-02, 4.4898e-02,
-4.8260e-01, -2.1329e-01, 1.5890e-02,
-2.6600e-01, -8.8519e-02, -4.3800e-02,
-1.7299e-01, -2.0757e-01, -2.6658e-01,
6.9707e-02, -4.4700e-02, 6.5570e-02,
2.3992e-01, 1.5078e-01, 2.8713e-02,
-9.1197e-02, 1.9765e-02, -1.8751e-02,
-9.9277e-02, -3.1437e-01, 4.0730e-02,
2.4208e-02, -8.8322e-02, -1.6245e-01,
1.3037e-02, -3.4708e-02, -4.4285e-02,
-1.3592e-01, -1.3575e-01, -7.4546e-02,
1.4670e-01, -1.3366e-01, 2.1553e-03,
8.1235e-03, -1.2068e-01, -5.7287e-02,
1.8015e-01, 2.1390e-01, 8.6923e-03,
2.8833e-01, 6.6345e-02, 1.4578e-01,
2.2338e-01, 2.6453e-01, -2.9112e-02,
1.4018e-01, -9.2824e-02, -2.2795e-02,
1.2360e-01, 2.2527e-01, -1.1817e-01,
-3.8872e-02, -1.9982e-02, -7.7514e-02,
1.7744e-03, 3.1736e-02, 4.5882e-02,
-2.5222e-02, 2.4298e-01, -3.8596e-02,
1.2545e-02, 3.1872e-02, 7.1925e-02,
7.9782e-02, -1.5533e-01, -1.4619e-02,
-1.2223e-01, -1.8631e-03, -9.8832e-02,
-1.6815e-02, -8.1440e-02, 6.8038e-02
}
,
{
2.3898e-02, 1.2411e-02, -3.2770e-02,
-2.6029e-01, 3.2690e-01, -1.8246e-01,
1.1224e-02, 8.0193e-02, -5.0412e-02,
-9.3849e-02, 2.0325e-02, 2.6309e-02,
1.2266e-02, 1.7698e-01, 2.7049e-01,
1.2918e-01, 2.0190e-01, 2.7352e-01,
-7.2100e-02, 1.3357e-01, -1.3702e-01,
2.2527e-01, 1.5821e-01, -2.3104e-01,
1.0182e-02, -1.5499e-01, 7.1906e-02,
1.5865e-01, 7.0950e-02, -6.3336e-02,
2.2661e-01, -4.2997e-01, -4.2013e-01,
1.7549e-02, -1.3142e-01, -3.1663e-01,
1.3617e-01, 1.4229e-01, -1.0707e-02,
-1.0986e-02, 2.8816e-01, -3.6239e-01,
2.2579e-02, -1.4332e-02, 7.1339e-03,
-1.4357e-01, -9.7608e-02, 1.4646e-01,
-5.3856e-02, 3.3898e-01, -2.4936e-01,
-2.9500e-02, 2.1799e-02, 1.1901e-02,
3.6996e-02, 2.1291e-02, 3.2150e-02,
9.8375e-02, 2.4476e-01, 2.2896e-01,
1.8392e-01, -7.4510e-02, -1.0152e-01,
4.4757e-02, -4.8053e-03, -6.7254e-02,
-4.8370e-02, -7.8975e-02, -3.6007e-01,
-3.8160e-02, 8.7707e-02, -1.4986e-01,
-8.7544e-03, -4.3522e-02, 7.3822e-02,
-1.4523e-01, 1.1433e-01, 4.4109e-02,
-1.6025e-03, 2.5459e-02, -9.3562e-02,
-2.9192e-02, -1.0975e-01, -5.0943e-02,
-1.1215e-01, 1.9907e-01, 7.9934e-02,
3.7066e-02, 3.0796e-01, -1.4034e-01,
-8.2315e-02, -2.0182e-02, -1.2824e-02,
-4.8007e-03, 1.2655e-01, -2.5157e-02,
2.7796e-02, -4.3032e-02, 2.5397e-02,
6.9377e-02, 2.3642e-01, 1.2713e-01,
2.7878e-02, -1.5325e-01, -1.4871e-01,
1.5800e-02, -4.5935e-02, 1.7370e-01,
4.8058e-02, -1.8725e-01, -6.7048e-03,
-1.3932e-01, -6.0768e-02, -1.6976e-01,
-2.1189e-02, 1.0311e-02, -2.2970e-02,
-7.0546e-03, 7.9481e-02, 1.2146e-02,
4.2666e-02, 3.5383e-01, 1.4381e-01,
5.4384e-02, -9.3862e-02, 4.8870e-03,
2.1141e-02, -6.6826e-02, -1.8526e-01,
1.3309e-01, 3.3452e-01, 1.1058e-02,
-1.6967e-02, 1.1094e-01, 5.3230e-02,
3.0409e-02, -4.7613e-02, -1.7737e-01,
-1.6678e-02, -7.8644e-02, 1.1743e-01,
7.3322e-02, -1.1354e-01, -1.5737e-02,
-1.2397e-03, -1.4685e-02, -1.0192e-02,
1.6045e-01, 3.6331e-02, 1.2219e-01,
1.3123e-01, 5.7578e-02, 1.0291e-01,
1.7424e-01, 1.0688e-01, 1.4263e-01,
8.9942e-02, -2.7141e-02, 3.1238e-02,
-4.0240e-02, -1.0930e-01, -2.1276e-01,
1.0357e-01, 5.7673e-02, 1.0356e-02,
-2.0864e-01, -1.9405e-01, 2.5094e-01,
-4.8277e-03, -1.3758e-01, 1.1562e-01,
-1.0358e-01, 2.0631e-01, -9.1445e-03,
-1.7602e-01, 1.0200e-01, 3.0032e-02,
-1.1495e-02, -4.5077e-02, -6.4748e-02,
-2.3072e-02, -3.2342e-02, 1.4503e-02,
-3.7052e-02, -1.2206e-01, 5.5395e-02,
2.8331e-02, -4.2812e-03, 6.9807e-02,
4.3593e-02, -6.7373e-03, 1.2760e-02,
3.2896e-03, -2.4007e-01, -5.2920e-02,
2.5193e-02, -2.1480e-01, 8.4654e-02,
2.2642e-02, 8.2132e-02, -2.3864e-02,
-2.9726e-01, 8.0405e-02, -1.3190e-02,
-1.1310e-01, -4.4342e-01, -6.3536e-02,
-6.7090e-02, 1.1797e-01, 1.5315e-01,
7.7829e-02, -1.4494e-01, 1.0233e-01,
9.7059e-02, 1.2772e-01, -2.4394e-02,
-2.6179e-02, 2.6721e-02, 1.1707e-02,
-4.8024e-02, -2.3366e-01, -1.6978e-01,
-2.4402e-01, -2.8572e-01, -2.4053e-02,
-2.7451e-03, 7.1959e-02, 4.4706e-02,
-1.9900e-01, 2.1353e-01, 1.0625e-01,
4.0246e-01, 4.2323e-01, 3.4046e-02,
-1.6943e-01, -2.0221e-01, -1.6369e-01,
1.3882e-01, 2.1717e-01, -1.3581e-01,
1.3975e-01, 1.1980e-01, 1.8888e-02,
-1.8110e-01, -2.6143e-01, -1.0109e-01,
5.5844e-02, -1.2175e-01, 3.4447e-02,
8.9688e-02, 2.4641e-01, 2.3287e-01,
-5.8259e-02, -1.3656e-01, -1.3936e-02,
-8.3429e-03, 2.3026e-01, 1.2302e-01,
-2.2969e-02, 6.0932e-02, 3.4749e-02,
1.2910e-01, 2.4008e-01, 1.8908e-01,
-5.8776e-02, 3.8121e-01, 8.1312e-02,
9.1175e-02, -1.8729e-02, -4.6156e-02,
3.7493e-02, -3.5877e-02, -9.9651e-03,
1.5864e-01, 1.3611e-01, 6.7880e-02,
2.2216e-01, 9.3697e-02, 7.4782e-02,
-1.0861e-01, -2.5824e-01, 6.6455e-02,
9.2238e-02, -2.3448e-01, -3.4057e-01,
-2.9658e-01, 9.4698e-03, 1.9315e-01,
-5.2396e-02, 1.2310e-01, -5.2917e-02,
-4.3708e-03, 1.9560e-01, -2.4309e-02,
-6.7388e-02, -8.8839e-02, -2.0907e-02,
4.6550e-02, 3.4119e-02, 6.0977e-02,
-1.0054e-02, 1.4411e-01, 1.5622e-01,
1.7401e-02, 2.5685e-01, -9.1853e-03,
-4.4530e-02, -1.8623e-01, -8.4557e-02,
9.5962e-02, 2.6491e-01, 1.7854e-01,
-2.0547e-02, -1.2023e-01, -7.6897e-02,
-1.3418e-01, -1.4960e-01, 1.6292e-01,
-1.7275e-01, -6.0181e-02, -2.7034e-02,
-7.4189e-02, -3.5566e-02, 1.3995e-01,
3.0758e-02, 3.3476e-02, 6.9837e-03,
-6.1089e-02, -9.6021e-02, 7.1716e-03,
1.0389e-01, 4.7963e-02, 9.5921e-02,
4.4569e-02, 1.2230e-01, -1.4417e-01,
-1.2825e-02, 3.1980e-01, -3.5905e-01,
-1.2557e-01, -7.5283e-02, -1.2343e-01,
1.9791e-01, 7.9003e-02, 3.1163e-02,
1.0969e-01, 1.6839e-01, -2.5816e-01,
-1.2617e-01, 1.3686e-01, -2.1078e-01,
-2.1870e-02, -1.8378e-01, -2.8893e-01,
-8.2523e-02, -3.0475e-02, 9.6007e-02,
1.0669e-01, -1.4581e-03, 3.2441e-01,
-8.1872e-03, 1.1690e-02, -4.0179e-02,
-1.0835e-01, 3.6112e-01, -4.5990e-02,
-1.2355e-01, -1.3372e-01, 3.8136e-02,
-9.1530e-03, 3.5432e-02, 4.3950e-02,
-8.6859e-02, 1.5887e-01, 1.2796e-02,
1.3554e-02, -1.5669e-01, -1.4371e-02,
-4.6609e-02, 1.7114e-01, -7.8284e-02,
1.7611e-01, 4.1204e-01, 9.3281e-02,
1.1420e-01, 1.2951e-01, -7.6025e-02,
-5.4831e-02, 9.7574e-02, 3.2839e-02,
3.8475e-02, -6.0247e-02, -2.9627e-02,
-2.4367e-02, 1.3143e-02, 4.7017e-02,
2.3800e-02, -2.4046e-02, -5.7044e-02,
2.7280e-02, 7.8573e-01, 1.0079e-02,
6.4100e-02, 5.1584e-02, 7.9653e-03,
-8.9480e-02, -1.6207e-01, -8.9418e-02,
-3.5589e-02, 3.5903e-01, -1.8381e-01,
9.2356e-02, 8.8046e-02, -5.0229e-02,
1.8609e-02, 1.1243e-01, 5.2599e-02,
-1.3374e-02, -3.3097e-01, 6.5346e-02,
2.6760e-01, -1.0281e-01, 1.1607e-02,
7.6576e-03, -3.5957e-02, 3.1924e-02,
-7.0088e-02, 9.1241e-02, 1.2827e-02,
3.7165e-02, 7.0273e-03, -7.3945e-04,
-6.5406e-03, 7.2666e-02, -5.7348e-02,
-1.9100e-01, -7.4449e-02, -1.2496e-01,
1.5299e-01, -8.8047e-02, -2.1810e-02,
-3.0241e-02, -7.4310e-03, -8.7682e-02,
-2.2479e-02, 9.6008e-02, -8.4539e-02,
-2.8915e-02, 1.7538e-01, -3.7735e-02,
-9.8463e-03, -6.9618e-02, -2.6095e-01,
9.9950e-02, 5.0534e-01, -1.8812e-01,
-1.1986e-01, 7.1166e-02, -2.4769e-02,
8.8529e-02, 9.8348e-02, 2.1136e-02,
-9.0337e-03, 1.3679e-01, -1.2115e-01,
-6.2478e-03, 1.1436e-01, -3.4610e-02,
-2.7350e-02, 1.0702e-01, 1.6220e-02,
1.0912e-02, 1.0953e-01, 8.6762e-02,
2.9348e-03, -2.2035e-02, 1.2376e-01,
7.0102e-02, -1.0945e-01, -1.6640e-01,
-3.9916e-03, -2.6658e-02, -9.7031e-02,
-3.0047e-02, 1.6631e-03, -5.5031e-02,
-7.9624e-02, 1.9976e-01, 1.9582e-01,
2.1377e-01, 3.5835e-01, 1.7012e-01,
-9.7751e-02, 4.9143e-01, 1.0988e-01,
8.4055e-02, -7.3187e-03, -9.8808e-02,
5.0590e-02, -8.9291e-02, -6.6857e-02,
9.6737e-02, -3.0699e-01, 2.2889e-01,
2.6727e-40, -5.2704e-40, -4.5038e-40,
-3.3108e-40, 5.2330e-40, -1.2724e-40,
-3.2957e-40, -5.8613e-40, 2.1618e-40,
-4.3882e-40, -3.3950e-40, 5.9372e-40,
2.7277e-40, -1.3741e-40, -3.3597e-40,
5.0687e-40, 4.7873e-40, -3.2116e-40,
-6.1388e-40, -6.0790e-40, -5.2667e-40,
-5.6524e-40, -6.1696e-40, -5.9796e-40,
1.5824e-40, -5.2002e-40, -5.8960e-40,
-5.9860e-40, 3.6419e-40, 2.9975e-40,
-5.8988e-40, 3.3994e-40, -5.0611e-40,
3.6410e-40, 2.9550e-40, 4.7468e-40,
2.7503e-40, -3.4103e-40, 6.0339e-40,
-1.7691e-40, 6.7170e-41, 1.7101e-40,
2.7166e-40, 4.3023e-40, 2.7735e-40,
-3.1937e-40, -4.9247e-40, -6.2495e-40,
5.2938e-40, -3.3702e-40, 1.4976e-41,
1.4031e-40, -4.6995e-40, -5.2409e-40,
2.5460e-40, 2.6670e-40, -4.5339e-40,
4.2896e-40, -5.7141e-40, -1.7003e-40,
2.3597e-40, 1.3748e-40, 4.6163e-40,
4.0680e-41, -6.1642e-40, 2.7304e-41,
5.2250e-40, -3.9481e-40, -6.1808e-40,
1.9462e-40, 2.6005e-40, -2.7281e-40
}
,
{
1.3625e-02, -8.5594e-02, -1.9901e-01,
-6.4636e-02, -1.9030e-02, 4.1963e-02,
-7.5507e-02, -2.4474e-01, -4.2621e-02,
2.8195e-02, 7.3102e-02, -9.3331e-02,
7.7093e-02, 1.7800e-01, -7.6451e-02,
2.8565e-02, -1.3540e-01, -1.9169e-01,
-1.8583e-02, 3.0135e-02, 8.1094e-03,
-1.2835e-01, -1.8041e-01, -8.9020e-02,
-8.2731e-02, 3.7861e-02, -9.4014e-02,
4.6595e-02, 2.2052e-02, -1.5867e-01,
-1.0937e-02, 1.0030e-01, -1.3018e-01,
-9.1844e-02, -1.7508e-01, 2.2087e-01,
-9.3080e-02, 9.8069e-02, -7.0154e-02,
-6.6063e-02, -2.2142e-01, 4.1058e-01,
-6.5947e-02, -5.4662e-02, 9.9412e-02,
-5.1938e-02, 3.0932e-03, 1.8126e-01,
3.6701e-02, -3.0349e-01, 9.9839e-02,
2.5810e-02, 2.3644e-01, -2.4461e-01,
2.1054e-01, 1.5630e-01, -1.9587e-01,
5.0146e-02, -1.8844e-02, 3.6675e-01,
-4.0389e-03, 3.1596e-01, 3.6771e-03,
-2.2256e-40, 1.4272e-40, -2.0732e-40,
5.5913e-40, -6.0538e-40, 1.2791e-40,
4.5825e-41, 4.1080e-41, -1.8211e-40,
2.2687e-01, -5.8992e-02, 4.7796e-03,
6.0603e-01, 2.7961e-01, 1.5973e-02,
2.3035e-01, 1.3031e-01, -9.9280e-03,
-4.7235e-02, 5.1773e-02, -4.8586e-02,
-1.4510e-01, -1.7336e-01, 1.0981e-01,
-2.0303e-01, -1.6008e-02, -1.8524e-03,
-2.3440e-01, -3.2373e-02, -6.7911e-02,
-1.6256e-01, 1.2316e-01, 2.7859e-02,
8.5089e-04, -3.7401e-02, -1.8672e-02,
-1.0418e-01, -7.8407e-02, -1.8413e-02,
8.2834e-02, 2.3128e-01, 3.2983e-02,
3.1099e-02, -6.4485e-02, -8.1659e-02,
1.9152e-01, -1.9609e-02, 2.7364e-02,
1.0458e-02, -1.2507e-01, 4.1334e-02,
-4.6215e-02, 5.6944e-02, 2.1477e-02,
-1.4934e-01, -6.8383e-02, 2.7957e-02,
-3.6846e-01, 4.8766e-01, 6.4000e-02,
-3.9621e-02, -8.1667e-03, 4.5997e-02,
-6.1391e-02, 1.2976e-02, -3.2152e-02,
7.5767e-02, 1.2931e-01, -2.3498e-02,
4.0320e-02, 1.3876e-02, 1.1022e-02,
-6.2401e-41, 5.8564e-40, 3.9473e-40,
-5.6890e-40, -2.6022e-40, -2.9841e-40,
-4.2456e-40, -1.1546e-40, 4.4955e-40,
-4.2969e-02, -1.0995e-01, 1.3021e-01,
1.0142e-01, 5.2225e-01, -5.5486e-02,
-7.2349e-02, 8.5470e-02, 2.3438e-02,
-1.0690e-01, -1.4370e-01, -1.2632e-01,
2.8754e-02, 1.1662e-01, 5.6515e-02,
-1.5726e-01, -1.4945e-01, -4.4956e-02,
1.6574e-01, -5.6894e-02, -2.0851e-01,
8.1498e-03, -2.5441e-01, -1.4412e-01,
-1.0959e-02, -2.5811e-02, 8.8934e-02,
6.3594e-02, -9.3314e-02, 7.8247e-02,
4.6795e-02, -2.2774e-01, 7.1041e-02,
1.4830e-01, 1.9911e-01, 5.1978e-02,
7.4936e-02, 2.3104e-02, 6.3928e-02,
-1.3118e-02, 6.7544e-02, 7.9514e-02,
2.2335e-02, -9.9442e-02, 6.8070e-03,
2.4395e-02, -3.3576e-02, 5.5508e-02,
-4.0872e-02, 5.4501e-02, -5.7051e-02,
8.6621e-03, -1.5361e-01, 1.2630e-01,
-2.2344e-01, 1.3335e-01, -1.1688e-01,
-2.4232e-01, 3.3319e-01, -1.2580e-01,
-2.2169e-02, 2.0594e-01, 2.6521e-02,
4.1883e-40, -3.4540e-40, 4.9152e-40,
-1.5711e-40, 3.3927e-40, -5.5069e-40,
5.5831e-40, -5.2011e-41, 1.0351e-40,
1.7989e-01, 2.3787e-02, 5.7447e-03,
4.8748e-01, 3.0152e-01, 3.5517e-02,
2.2155e-01, 1.8812e-01, 3.0994e-02,
7.8657e-02, -7.1135e-02, -5.8293e-02,
-1.4220e-01, 1.6004e-02, -2.5180e-02,
-1.6811e-01, -2.3441e-01, 1.4810e-02,
5.3140e-02, -1.2904e-01, -1.5105e-02,
5.4525e-02, -1.5418e-01, 6.6507e-02,
8.3947e-02, -1.1975e-01, 5.3902e-02,
8.0834e-02, -2.4321e-01, -1.0282e-03,
3.1276e-03, 3.2495e-01, -1.3238e-02,
4.5285e-02, 5.8777e-02, -1.3231e-01,
-6.0928e-03, 8.7145e-02, 6.2031e-02,
-5.3919e-01, -6.8810e-02, -1.0755e-01,
-2.2571e-02, 2.6237e-02, -6.8731e-03,
-6.6771e-02, -2.0586e-01, 4.7722e-02,
-3.4968e-01, 3.0912e-01, 2.4487e-01,
-4.9537e-02, -5.2779e-04, 6.7840e-02,
1.7583e-02, 3.3222e-02, -5.7070e-02,
-2.3250e-01, 1.4470e-01, -4.9895e-02,
3.3147e-02, 8.6319e-02, 4.4719e-02,
-6.9454e-41, 2.0308e-40, -1.1977e-40,
5.9045e-40, -2.6129e-40, 4.8298e-40,
4.7288e-40, 6.0736e-40, 2.2462e-40,
-4.0294e-02, -9.1437e-03, -2.4926e-02,
-2.1269e-01, 1.1602e-01, 1.4383e-02,
5.1456e-02, 6.9047e-02, 1.6519e-02,
6.3737e-02, -9.0181e-02, 7.0716e-02,
7.0061e-02, 7.9046e-02, -4.3925e-02,
7.4396e-02, -5.2797e-02, 3.8125e-02,
7.5999e-02, -5.1307e-02, 2.4326e-03,
-3.1716e-02, -1.2567e-01, -3.3898e-02,
8.4925e-02, -5.2404e-02, 2.8535e-02,
9.6844e-03, 4.6980e-02, 3.8552e-02,
-5.7110e-02, 3.2163e-02, 1.5219e-02,
6.6905e-02, -2.7934e-02, 1.4184e-03,
-2.4239e-02, -8.6317e-03, -2.3295e-03,
-2.3065e-02, 1.0076e-01, 2.1562e-03,
-1.3647e-02, -3.4262e-02, 2.5777e-02,
7.6601e-02, 1.3654e-01, 2.1458e-03,
1.4542e-01, 3.6310e-01, 1.6266e-01,
-5.8465e-02, 4.3751e-02, 1.9227e-02,
9.1783e-03, -5.9547e-02, -1.8234e-02,
-5.3399e-02, 1.9218e-01, -4.6238e-02,
-1.9052e-01, 1.4635e-02, 2.9536e-02,
1.4621e-40, -5.5132e-40, -4.6215e-40,
4.3948e-40, -2.7285e-40, -5.5709e-40,
1.9428e-41, -4.0333e-40, -5.4469e-40,
9.3126e-02, -1.3236e-01, 9.9350e-02,
-1.3308e-01, 3.5030e-01, 9.2221e-02,
1.1783e-01, 1.6648e-01, -7.9150e-02,
2.2654e-01, -1.2546e-01, -1.2354e-01,
-1.6457e-01, -6.0740e-02, -3.1069e-02,
-8.3203e-02, -1.8064e-01, 4.6900e-02,
1.2059e-01, -1.0569e-01, -7.1196e-02,
-9.2991e-02, -1.7587e-01, 1.3100e-03,
-1.5492e-01, -1.3849e-01, 1.2245e-01,
-5.5276e-02, -9.7867e-02, 3.5550e-02,
-6.0264e-02, 4.7760e-02, 6.0242e-02,
-5.4096e-03, 2.4646e-01, 6.3592e-01,
5.8559e-02, 6.1117e-02, 8.0334e-02,
-4.4582e-03, -1.2028e-01, 8.7394e-02,
-2.5880e-02, -1.2206e-01, 1.2199e-01,
4.1990e-02, -1.3283e-01, 4.9047e-02,
-4.9532e-02, 2.7688e-01, -4.6064e-03,
-2.8812e-03, -2.4404e-01, 5.8614e-02,
-1.4262e-01, -1.2810e-03, -1.2060e-01,
-8.3595e-02, 5.6532e-02, -7.7556e-02,
-1.3364e-01, -1.3883e-01, -1.2335e-01,
-1.3273e-40, 6.5184e-41, -4.6946e-40,
-4.0031e-40, -1.2807e-40, -3.1584e-40,
1.3009e-40, 2.4187e-40, -1.4202e-40,
-8.8844e-03, 1.0101e-03, -6.0190e-02,
-1.8851e-01, -7.6662e-02, -1.4562e-01,
2.9983e-02, -8.1533e-02, 1.1256e-02,
1.0205e-01, 6.7850e-02, -1.0911e-01,
-1.2846e-01, -5.4605e-02, 6.2182e-02,
-1.0797e-01, -5.1281e-02, -1.2036e-02,
-8.1693e-02, -7.0432e-02, 1.6990e-01,
-1.7329e-01, -2.2084e-01, -3.0977e-02,
8.2771e-02, -3.3089e-01, -1.4842e-01,
1.9576e-02, -1.5953e-01, -1.0348e-01,
6.6014e-02, 6.0094e-01, -6.9891e-04,
7.4969e-02, -1.4250e-01, 4.3221e-02,
1.6796e-02, -6.8125e-03, 4.7028e-02,
-3.3421e-01, -2.2987e-01, 4.2936e-02,
9.3985e-04, 9.0827e-02, 2.4211e-01,
-8.1571e-02, -1.0276e-01, 1.9092e-01,
2.1112e-01, 2.6837e-02, -2.5822e-01,
-1.3290e-01, 1.6135e-01, -2.7672e-02,
3.4465e-01, -8.3286e-03, -6.1936e-02,
2.7406e-01, -6.8357e-02, 1.7426e-01,
-9.0872e-02, 1.2999e-01, 7.2366e-02,
3.0944e-40, -1.2808e-40, 2.9336e-40,
5.5561e-42, 3.0978e-40, 1.0027e-40,
-1.5881e-40, -2.9858e-40, 3.1599e-41,
-9.1935e-02, -2.2666e-04, -6.2821e-02,
-1.8605e-01, 3.0238e-01, 3.2759e-02,
-5.0771e-02, 1.4585e-02, -1.0872e-01,
2.5511e-02, -9.3394e-02, 1.4810e-02,
-6.2906e-02, 9.2472e-02, 1.2845e-02,
-2.9041e-01, -9.6489e-03, -2.7277e-02,
-6.9896e-02, -1.1645e-01, -5.9870e-02,
-2.8037e-02, -2.2649e-01, 5.1781e-02,
-1.4588e-02, 4.8753e-02, -2.8256e-02,
-1.6462e-02, 8.0795e-02, 3.6222e-02,
8.0392e-02, 3.0118e-01, 2.0021e-01,
1.0394e-01, 6.4196e-01, 4.9545e-01,
2.1242e-02, -1.2514e-01, 1.0066e-01,
-4.7676e-02, -2.0736e-02, -5.6951e-03,
-8.3021e-02, 4.6763e-02, 1.7551e-01,
2.0038e-02, 1.8084e-01, 1.3244e-02,
1.0280e-02, 2.8740e-01, 8.9837e-03,
-2.9437e-02, -3.7366e-01, -1.1861e-01,
-4.8248e-03, -1.2970e-01, -1.8680e-02,
1.8458e-01, 5.6509e-02, 1.2734e-01,
1.9423e-01, -3.6960e-01, -2.5555e-02,
6.7959e-41, -3.2251e-40, -3.0631e-40,
-4.0701e-40, 9.7399e-41, 2.2917e-40,
2.0169e-40, 5.7891e-40, -4.1286e-40
}
,
{
5.6253e-02, 1.0118e-02, -8.2749e-02,
-6.4074e-02, 4.0723e-02, 1.1657e-02,
-1.1560e-01, -3.5596e-03, -2.6713e-02,
-7.9090e-02, -2.9223e-01, 1.5759e-01,
6.8756e-02, 1.5738e-01, 1.5413e-01,
-6.1288e-02, -1.2536e-01, -1.5966e-01,
1.1165e-01, 5.0211e-02, -1.0338e-01,
-5.2364e-04, 1.7660e-01, -2.2504e-03,
-1.7697e-01, 1.8500e-02, 2.0693e-02,
-2.5907e-02, -1.4201e-01, 8.4467e-02,
1.1138e-02, 2.1769e-01, -4.2422e-01,
6.5046e-02, 2.6834e-02, 2.9047e-03,
-1.2130e-01, -5.1773e-01, -8.0393e-02,
3.0204e-02, 3.5952e-01, 1.6681e-01,
-9.4720e-04, 7.7291e-02, 8.3039e-02,
3.4689e-01, -1.2389e-01, -2.0666e-01,
-2.9650e-02, 1.1102e-01, -1.4782e-01,
3.2193e-02, -3.9862e-02, 1.6440e-02,
-8.4264e-02, 1.0192e-01, -6.4256e-02,
2.2950e-02, -6.6511e-02, -6.3814e-02,
4.3744e-02, -1.0557e-01, -1.2045e-02,
1.6330e-01, 6.6130e-01, 1.5497e-01,
1.7103e-01, 1.5073e-01, 1.7400e-01,
9.0985e-04, 1.0917e-02, -1.3322e-02,
-6.4273e-02, -6.2178e-02, -7.7223e-02,
-1.0332e-01, -2.1072e-01, -2.2843e-03,
3.2717e-02, -6.3754e-02, 5.0359e-02,
-5.2566e-02, 6.2090e-02, -1.5614e-02,
1.4570e-02, -1.0243e-01, 1.3091e-01,
-2.9988e-02, -7.5897e-02, -9.4541e-04,
-2.7999e-01, -4.7415e-03, 5.6419e-02,
7.0565e-02, -4.9273e-01, -1.2936e-01,
5.5685e-02, -5.8924e-03, -3.1967e-02,
8.8602e-02, 2.9337e-01, 1.3753e-01,
1.0063e-02, 1.6348e-02, 1.0063e-01,
3.6230e-02, 1.7968e-02, -1.1624e-01,
-2.2488e-02, 1.3474e-01, -1.1419e-01,
2.8576e-02, -7.4794e-02, -7.7261e-02,
5.8874e-02, -2.9448e-03, 6.0207e-02,
1.4642e-01, 1.2321e-01, -2.4936e-01,
2.2609e-02, -2.8171e-01, 1.1510e-01,
2.6056e-02, -2.7532e-02, -4.7505e-02,
-2.8762e-02, -1.2610e-02, -8.3766e-02,
-5.0992e-02, -5.7269e-03, -7.0981e-02,
-9.6191e-02, -9.2384e-02, -5.3328e-02,
2.3989e-01, 3.9819e-01, 1.8451e-01,
3.6888e-02, 1.1023e-01, 4.4804e-03,
-4.4140e-03, -4.8275e-03, 2.0018e-02,
-2.4346e-02, -6.5546e-02, -4.6065e-03,
2.2298e-01, 2.8810e-01, 1.4071e-02,
-1.7315e-01, -5.7961e-02, -9.9136e-02,
3.6456e-02, -1.5518e-02, 6.4490e-02,
4.6983e-02, 5.2743e-02, 3.0802e-01,
6.7940e-02, 5.8777e-03, 3.1155e-01,
9.9510e-02, 2.7974e-02, -6.6716e-02,
3.7042e-01, 2.0813e-01, -3.1581e-02,
7.9064e-02, -1.3699e-01, -4.4722e-02,
-8.4753e-03, 8.0676e-02, 1.5771e-01,
-1.1467e-01, 5.6269e-02, 1.1369e-01,
-1.4727e-02, 3.7263e-02, -2.0554e-01,
8.3383e-02, 4.5848e-02, -1.1732e-02,
4.5494e-02, -2.1406e-01, 6.0591e-02,
4.6503e-02, -1.0362e-01, 3.8794e-02,
-4.6633e-01, 1.4504e-01, 1.4999e-01,
2.9642e-01, -4.8807e-01, -1.6012e-01,
1.6708e-01, 9.5313e-02, -7.5981e-02,
-4.2655e-02, 9.2470e-02, -7.7242e-02,
-2.1021e-01, 1.2423e-01, 1.4967e-02,
-5.4129e-02, 7.4355e-02, -4.7068e-02,
-1.6048e-01, 9.8742e-02, 4.4282e-02,
-6.0187e-02, 1.9495e-01, 8.3291e-02,
-7.5190e-02, -6.8429e-02, 3.7391e-02,
5.1413e-04, 1.5098e-01, -1.1549e-01,
1.6875e-01, 1.8040e-01, -1.3162e-01,
7.7101e-02, 2.0816e-01, 7.6289e-02,
-1.7528e-02, 1.4408e-02, 3.7500e-02,
3.8647e-02, 1.6850e-01, 1.7535e-02,
-2.8205e-02, 1.0273e-02, 1.6688e-01,
4.3676e-02, 6.9895e-02, 8.1063e-03,
-2.6117e-01, -1.0920e-01, 5.2209e-02,
-5.2749e-02, -1.7062e-02, -9.6808e-02,
2.7324e-02, 9.1342e-02, -5.0968e-02,
1.0689e-01, 5.0565e-01, 4.6004e-01,
-6.6862e-03, 3.4162e-03, 3.3559e-01,
3.5084e-02, 1.9123e-02, 1.0073e-02,
1.6995e-01, 3.4099e-01, -4.0847e-01,
-5.5317e-03, 4.0230e-02, -2.0305e-01,
-8.9786e-02, 1.9667e-01, 3.8111e-02,
3.0607e-02, -1.9084e-02, -6.5114e-02,
8.5394e-02, -1.3992e-01, 1.4988e-02,
-1.5926e-02, -9.1200e-03, -7.2328e-02,
1.3548e-01, 7.1040e-01, -9.4208e-02,
2.5411e-03, -7.2159e-02, 1.0848e-01,
-8.9029e-02, -8.6339e-02, -2.7546e-02,
6.0378e-02, 2.8401e-01, -6.6550e-02,
-3.0486e-02, 5.0307e-02, -1.1084e-02,
2.9732e-02, 9.9960e-02, -7.7408e-02,
3.4940e-01, -5.6048e-01, 2.9053e-02,
-2.6991e-02, 4.9637e-02, -3.9322e-02,
-1.0418e-02, 1.0931e-01, -6.1609e-02,
3.6057e-02, 9.3866e-02, -1.0339e-01,
-1.8572e-02, -2.0889e-02, -7.4531e-02,
-7.3236e-02, -4.5908e-02, 2.2705e-02,
-1.5148e-02, 2.1735e-01, 2.2477e-02,
-3.4153e-02, -2.6939e-02, -5.0167e-03,
6.6774e-02, 2.0168e-01, -7.5083e-02,
5.6608e-02, 2.2799e-01, -3.7473e-01,
-7.2336e-02, 4.4329e-02, -3.6747e-02,
3.5355e-02, 1.8671e-01, -4.0167e-02,
1.2871e-01, 3.5050e-01, 1.8090e-01,
-6.2429e-02, 6.2184e-02, 6.8804e-02,
-8.0164e-02, -2.4387e-02, -5.0309e-03,
1.0089e-01, -3.0008e-02, 1.7251e-02,
-9.4662e-03, -1.4760e-02, 7.3434e-03,
7.3290e-02, 2.2546e-02, -2.9015e-02,
7.9944e-02, -2.6972e-01, 7.1349e-02,
-1.7026e-02, 1.1461e-01, -4.1288e-02,
-5.3732e-02, -2.4618e-01, -1.2890e-02,
8.6133e-02, 1.9503e-01, 8.2202e-02,
-1.0060e-03, -4.5931e-04, -1.8789e-02,
-4.0843e-02, -7.8149e-03, -6.1464e-02,
-7.9364e-02, -5.9647e-02, -5.4059e-03,
1.9553e-01, -2.4079e-01, -7.9538e-03,
5.3620e-02, 1.4198e-01, 6.5651e-03,
2.3512e-02, -2.6609e-02, -4.6435e-02,
1.2499e-02, 5.1079e-02, -2.2713e-02,
-7.1554e-02, 1.0608e-01, 5.8972e-02,
1.8638e-01, -2.1053e-01, -6.4009e-02,
1.0851e-01, 7.2187e-02, 8.9722e-02,
-4.5365e-04, 1.0826e-01, -6.4141e-02,
-2.3874e-02, -4.6307e-02, -2.7813e-02,
1.8385e-02, 9.4687e-02, 6.8374e-02,
9.4526e-02, 1.4432e-02, 1.5937e-01,
1.1292e-01, -3.4274e-01, -1.0813e-01,
-7.4636e-03, 3.7101e-02, 3.7226e-02,
3.7079e-02, -3.9169e-02, -3.7752e-02,
-7.9021e-02, 8.5978e-02, 1.0958e-02,
-5.8576e-02, 5.5931e-02, 4.8301e-02,
-1.3402e-01, -3.3809e-01, -4.4369e-02,
1.4262e-01, 6.5254e-02, -3.3366e-01,
1.2416e-02, -9.0492e-02, -5.8205e-02,
-1.4886e-01, 4.0598e-02, -1.4219e-01,
2.0223e-03, -2.8673e-01, -3.3622e-01,
1.9191e-02, -2.2104e-02, 1.9048e-02,
6.0021e-02, 2.2520e-01, -5.3972e-02,
1.6226e-01, -2.1918e-01, -5.2117e-02,
-6.2363e-03, 2.0266e-01, -7.3323e-03,
1.1137e-01, -1.9300e-02, -5.4983e-02,
-1.8338e-01, 6.2511e-01, -1.7909e-01,
1.7003e-01, 1.7902e-01, 5.4462e-02,
5.6847e-02, -7.4696e-02, -1.1354e-02,
1.0544e-01, -1.4918e-01, 4.8208e-02,
-5.6262e-02, -2.3303e-01, -2.9916e-02,
-3.3261e-02, 1.3287e-01, 1.9831e-02,
-1.3907e-01, -1.6180e-01, -7.2323e-03,
-5.1689e-02, 6.3121e-02, -1.4480e-01,
1.1143e-01, 4.9625e-02, -5.4369e-02,
-3.9247e-01, 2.3412e-01, -3.6726e-02,
-1.1468e-02, 3.4045e-02, 6.6454e-02,
-5.0103e-02, 6.1740e-02, 4.2922e-03,
1.7669e-01, -8.1250e-03, 6.3694e-03,
-6.7723e-02, 7.4576e-02, 1.0113e-02,
1.1264e-01, -4.4691e-02, -5.3575e-02,
3.4691e-02, -1.2201e-02, -8.4221e-02,
2.3677e-01, 3.9073e-01, 2.4710e-02,
-8.4580e-02, -1.0747e-01, -6.5695e-02,
1.5386e-01, 1.4041e-01, 6.9961e-03,
2.6138e-02, 2.3149e-02, -1.8820e-02,
-3.3541e-02, 3.2089e-02, -1.8916e-02,
1.0564e-01, -7.5319e-02, -5.4282e-02,
-6.9388e-03, -2.0873e-02, 5.6100e-02,
2.3524e-02, -6.4296e-02, 5.8950e-02,
-3.1415e-03, -4.1203e-02, 1.0781e-01,
1.7848e-02, -2.9535e-02, -1.6412e-02,
-4.6649e-02, 8.1277e-02, -5.9918e-02,
8.1522e-02, -9.2037e-02, 8.1039e-03,
-6.5541e-02, 5.1811e-02, -1.4380e-03,
5.0419e-02, 9.3091e-03, -2.8054e-02,
-3.0979e-02, -2.5366e-02, 3.5265e-02,
-3.7730e-02, 5.7574e-02, 3.4683e-02,
4.8819e-03, -2.9519e-02, 3.7740e-02,
6.4546e-02, -3.7272e-01, -8.5393e-02,
-3.0223e-02, -7.7899e-02, 2.7365e-03,
2.2282e-02, -3.3440e-02, 1.9048e-02,
2.3275e-02, -2.1153e-02, -2.0385e-02,
-4.6245e-02, 2.2443e-02, -3.0206e-02,
-2.5302e-02, -1.1418e-02, 4.8228e-02,
5.8367e-02, -4.3062e-02, 2.2814e-02,
-4.6279e-02, 5.0052e-02, 2.2961e-02,
-5.4984e-02, 1.4773e-01, -2.5546e-02,
3.3025e-02, -1.0138e-01, 6.3886e-02,
1.2403e-02, 1.6215e-02, 1.0783e-02
}
,
{
2.5042e-02, -5.3266e-02, 3.8484e-02,
3.7189e-03, 1.0493e-01, 1.4459e-01,
-3.7442e-02, -1.5744e-01, 1.9957e-01,
-1.9203e-02, 1.6256e-02, 4.2906e-03,
-3.1637e-02, 5.0287e-01, -6.9504e-02,
1.4677e-03, -8.9984e-02, -9.0376e-02,
4.0578e-02, 2.4004e-02, 3.4044e-03,
7.5916e-02, -1.3564e-01, -9.0296e-02,
3.4156e-02, 7.2494e-02, -2.0037e-02,
-6.4614e-02, -1.7301e-03, -3.3444e-02,
-2.7950e-01, 7.1351e-01, 4.2825e-02,
2.4797e-02, 5.4162e-04, -8.9676e-02,
3.8002e-02, -2.7692e-02, -1.7757e-02,
1.9356e-01, 1.9598e-02, -1.0862e-01,
2.5734e-02, 1.1703e-02, -7.3912e-02,
-6.0213e-04, 1.6024e-01, -6.4591e-03,
3.1779e-02, -3.1049e-01, 1.2684e-02,
-1.0098e-01, -1.8839e-01, 5.1387e-02,
5.2004e-02, 3.1489e-01, 5.9716e-01,
-7.2238e-02, 3.4332e-01, -2.0655e-01,
1.1013e-03, -5.0328e-02, -4.6118e-02,
9.4442e-04, 2.7964e-02, 1.7672e-02,
-8.6022e-02, -3.8280e-02, 2.8017e-04,
3.3824e-02, -6.7883e-02, 1.0529e-02,
-6.5982e-02, 1.1385e-01, 3.0091e-03,
1.2330e-01, 6.1876e-01, 5.7145e-02,
-4.3835e-02, -6.8186e-01, -1.0917e-01,
3.2006e-02, -2.0627e-03, -6.9043e-02,
7.2219e-02, -3.2393e-01, -2.6657e-02,
1.3523e-02, 1.8099e-01, 4.9168e-02,
7.1367e-02, 9.8283e-02, 1.0425e-01,
2.2286e-01, -5.9374e-01, 1.0014e-01,
6.5700e-02, 1.3618e-02, -7.4045e-02,
1.0481e-01, 3.0734e-02, 1.0431e-02,
-2.1314e-01, -7.2817e-02, 1.2036e-01,
-5.4180e-02, 1.0500e-01, 2.7821e-02,
-5.0657e-02, 8.7702e-02, 7.0234e-02,
9.0349e-02, 1.4905e-01, 1.1612e-01,
5.9924e-02, 2.4928e-01, 1.7078e-01,
-5.9110e-02, -7.4252e-02, 9.8241e-03,
-1.2006e-01, 1.3879e-01, -1.4322e-02,
-7.5463e-02, 1.4407e-02, -6.9202e-03,
7.0279e-02, 1.7065e-01, -2.5150e-01,
-2.6289e-02, 3.8421e-01, -2.2051e-01,
-2.8918e-02, 4.0074e-02, -7.1296e-02,
1.0357e-01, -1.8885e-01, 2.3780e-02,
-1.8884e-01, -4.3326e-01, -1.1465e-01,
3.3497e-02, -1.3462e-01, -3.4127e-02,
-1.2731e-02, 5.4326e-02, -2.6581e-02,
5.1753e-02, 6.8200e-03, 4.3246e-03,
-6.9963e-02, -1.5618e-01, 2.5192e-01,
2.2890e-02, 6.1421e-02, 5.2832e-02,
-9.8369e-02, -1.1452e-01, 1.7420e-01,
2.0392e-01, -1.1322e-01, 9.8462e-02,
-3.3547e-02, -2.8993e-01, 7.0080e-02,
8.2478e-02, -1.9881e-01, 1.2849e-01,
-2.7802e-01, -1.5621e-01, 6.2712e-02,
1.3028e-02, 1.4716e-01, 2.0434e-02,
-4.4071e-01, 3.8359e-01, -1.6655e-03,
-2.0297e-01, 1.5631e-01, 7.7086e-02,
9.6714e-03, -5.5842e-03, 7.9155e-03,
1.4525e-01, -3.2228e-01, 1.1454e-01,
1.4527e-01, -3.0399e-02, -6.7043e-02,
9.4233e-03, -1.1296e-02, -1.0927e-01,
7.9300e-02, 5.5286e-02, -1.1558e-01,
3.8173e-01, -5.4351e-02, -1.7890e-01,
5.4882e-02, 1.5119e-01, 1.8363e-01,
-8.8223e-02, -9.0083e-02, 4.8221e-01,
4.0890e-02, 5.6429e-02, -2.8538e-01,
1.2102e-02, -1.8177e-02, -3.1643e-03,
-6.9064e-02, 3.1853e-04, -7.0113e-02,
9.7308e-02, 1.0691e-01, -6.5919e-02,
-1.4536e-40, -1.7049e-40, -2.6781e-40,
4.5792e-40, 1.4489e-40, 1.3645e-40,
-5.8774e-40, -2.2505e-40, -4.7571e-40,
3.3670e-40, 1.5398e-40, -3.3819e-40,
2.6303e-40, -1.9434e-40, -5.5555e-40,
-4.3830e-40, -2.8750e-40, -3.0788e-41,
5.6364e-40, 3.1307e-40, -2.3064e-41,
2.8909e-40, -5.8115e-40, 2.9852e-41,
-1.9273e-40, -7.5503e-41, -6.0335e-40,
5.8073e-40, 2.9252e-40, -1.3038e-40,
5.2260e-40, 3.8172e-40, -2.0389e-40,
-2.1905e-41, 1.8473e-40, -2.9226e-40,
2.9957e-41, 2.6068e-40, 6.1324e-40,
-4.3013e-41, 5.1421e-40, -4.1157e-40,
2.1416e-41, -1.6614e-40, -3.0843e-42,
-4.3402e-40, 2.8507e-40, 1.1560e-40,
3.8826e-40, -3.0797e-40, -6.0685e-40,
5.4170e-40, -6.1858e-40, 9.3049e-41,
-1.9491e-40, -1.9211e-40, -6.2723e-40,
3.9906e-40, 1.2356e-40, 3.8682e-40,
2.8630e-40, 6.2303e-40, 5.3034e-40,
-4.1904e-40, 4.8916e-40, -3.6125e-40,
-5.5393e-40, -2.4980e-40, -6.1877e-40,
2.7289e-40, -1.8348e-40, -5.6663e-40,
2.5152e-02, -3.2878e-02, 2.1626e-02,
1.9879e-01, 2.9080e-02, -3.0331e-03,
-2.3380e-01, -2.3578e-02, 1.1871e-01,
-3.1824e-02, -5.5095e-02, 3.1338e-02,
-3.2199e-02, -4.3820e-01, 4.1391e-02,
-4.1207e-02, 3.7475e-01, -1.8548e-01,
-1.4460e-02, -8.7834e-02, -3.2343e-02,
2.4023e-01, 7.1916e-01, -1.8559e-01,
-6.7635e-03, -9.4409e-02, -1.7890e-02,
-5.8334e-02, 1.8886e-01, 6.1547e-02,
-2.6152e-01, 6.6722e-01, -1.2486e-01,
-4.8128e-02, 1.0510e-01, -4.2619e-02,
3.0101e-03, 9.6380e-02, 6.6140e-02,
1.0201e-01, -2.3240e-01, -1.8356e-01,
4.0019e-02, 2.2985e-01, -1.2980e-01,
-1.1400e-01, -1.9221e-01, -3.4158e-02,
2.2871e-02, -6.8684e-01, -1.0856e-02,
2.6311e-02, 2.5422e-02, -1.5190e-02,
3.2182e-02, -5.6346e-02, 3.2655e-02,
-1.6912e-02, 8.4264e-02, -7.9521e-02,
1.2788e-03, -7.1110e-02, 8.6585e-02,
-4.2829e-02, 1.0778e-01, -6.8129e-02,
5.8156e-03, -2.3998e-01, 1.9052e-01,
-4.1855e-02, 1.0140e-01, -1.7139e-02,
5.2301e-40, -2.9923e-40, 3.8688e-41,
3.1575e-40, 1.1504e-40, 5.5655e-40,
-3.4499e-40, 2.3050e-40, -6.3766e-41,
1.3282e-40, 4.5849e-40, 3.5308e-40,
-2.6657e-41, 5.9829e-40, 3.2791e-40,
-2.8348e-40, 2.5810e-40, 5.5791e-40,
4.2613e-40, 3.2607e-40, -2.0789e-40,
-3.9054e-40, -2.5608e-40, -2.7638e-40,
4.5027e-40, 2.7065e-40, -4.5593e-40,
1.6336e-40, -2.0391e-40, -5.9017e-41,
-7.9899e-41, -2.9870e-40, 5.6390e-40,
-2.5560e-41, -1.9786e-40, 9.4700e-41,
-7.4049e-41, -2.3902e-40, -2.8497e-40,
-1.8912e-40, -1.5589e-40, 5.5463e-40,
-2.1782e-40, -1.9532e-40, -2.3785e-40,
2.7539e-40, 4.0214e-40, 2.0732e-40,
7.0120e-41, -4.4200e-40, 7.3787e-41,
2.6452e-40, 1.1970e-40, 2.8298e-40,
5.2721e-40, 1.9304e-40, -3.8489e-40,
-3.9759e-40, 2.6184e-40, 1.2594e-40,
1.5831e-40, 3.7179e-40, -3.4915e-40,
-1.7681e-40, -6.9657e-41, -4.0746e-40,
8.0894e-41, 1.6950e-40, -1.0574e-40,
-1.0590e-40, 2.8466e-41, -2.7558e-40,
-5.4027e-40, 4.4355e-41, -3.2144e-40,
-4.8838e-41, -3.8595e-40, 2.5064e-40,
4.0365e-40, -1.0195e-40, 4.8356e-40,
4.4499e-40, -4.4871e-40, -2.4561e-40,
4.1687e-40, 5.2239e-40, -5.7603e-41,
-1.5211e-40, -3.5768e-40, 3.6385e-40,
1.6089e-40, 4.1624e-40, 4.5114e-40,
1.6438e-40, -3.6331e-40, 6.4961e-41,
5.0899e-40, 6.1036e-40, 2.4828e-40,
5.8681e-40, -5.7259e-40, -1.5371e-40,
5.2654e-40, 4.7412e-40, -2.0265e-40,
-4.8621e-41, 4.9497e-40, 3.0176e-40,
4.2235e-40, 4.5381e-40, 4.6501e-40,
-1.6124e-40, -1.9449e-40, 5.1497e-40,
-1.2891e-40, -1.6549e-40, 4.8348e-40,
-2.0735e-40, 1.3423e-41, -4.4109e-40,
-5.4218e-40, -1.1537e-40, -1.1664e-40,
5.6006e-40, 3.4109e-40, -3.1434e-40,
3.4969e-40, -5.3459e-40, 3.9245e-41,
2.4028e-40, 5.7774e-40, -6.2973e-40,
1.8802e-40, -4.6258e-41, -5.0716e-40,
3.4962e-40, -6.2313e-41, -2.7290e-40,
-5.2709e-40, -3.2225e-40, 2.4245e-40,
-3.6300e-40, -2.0794e-40, 4.0541e-40,
-3.5157e-02, 6.8337e-02, 1.6149e-02,
-5.8650e-03, 6.0605e-01, 3.1738e-02,
9.3306e-02, 2.1499e-01, 1.3609e-01,
6.4043e-02, -1.0253e-02, -6.2813e-04,
4.6828e-02, -3.9619e-01, -9.2633e-03,
-8.1752e-02, 9.9083e-02, 4.4296e-03,
7.1594e-02, 3.9860e-02, 8.1088e-02,
1.7750e-01, -1.2381e-01, 1.4476e-01,
2.3416e-02, 1.2819e-01, 1.0816e-02,
5.5296e-02, 5.5199e-02, -2.1253e-02,
1.7214e-01, 2.0542e-01, -3.7859e-03,
1.2831e-01, 3.2087e-02, -5.1851e-02,
-2.3686e-02, 1.2271e-01, -1.6009e-02,
-2.0176e-01, 7.4757e-01, -3.4526e-02,
-4.7055e-02, -3.7099e-01, -1.9216e-01,
-8.8030e-02, -2.5853e-02, -1.7087e-02,
-2.0533e-01, 1.5214e-01, -1.8639e-03,
-1.1236e-01, -2.4612e-01, 6.3094e-02,
2.3829e-02, -5.0078e-03, 5.3854e-02,
-9.6934e-03, 3.7047e-02, 4.7325e-01,
5.6975e-03, -8.6108e-02, 6.5569e-02,
-3.9768e-03, 2.0580e-02, -4.1931e-02,
6.9577e-02, -1.0416e-01, -2.5037e-03,
-1.9198e-02, 6.2027e-02, -1.0833e-02
}
,
{
-5.3430e-40, 2.5717e-41, 5.7504e-40,
7.1679e-41, 6.2076e-40, -8.4201e-41,
-4.2111e-40, 3.4851e-40, 1.3009e-40,
3.3016e-40, -7.6473e-41, -1.8392e-40,
2.2773e-41, 1.2087e-40, 1.1565e-40,
6.5190e-41, 2.0075e-40, 2.5796e-40,
5.0575e-40, -2.6261e-40, -2.5486e-40,
-3.9886e-40, -6.0644e-40, 2.9264e-40,
8.9627e-41, -3.0550e-40, -2.3456e-40,
-4.8855e-40, -4.8867e-40, -5.0492e-40,
-1.0706e-40, 5.3827e-40, -1.6413e-40,
1.4714e-40, -3.4024e-40, -4.4881e-40,
3.2361e-40, 2.0858e-40, 3.8836e-40,
2.0949e-40, 5.9633e-40, -1.7878e-41,
-4.1980e-40, -4.4383e-40, 2.7859e-40,
7.0317e-42, -8.9973e-41, 5.8700e-41,
1.8411e-40, -3.6097e-42, 2.7362e-40,
5.4341e-40, 6.0305e-40, 5.9004e-40,
5.2692e-40, -6.3449e-41, 1.2075e-40,
7.5297e-41, 8.9267e-41, 4.9139e-40,
-1.4609e-40, 3.1821e-41, 2.3288e-40,
3.1748e-41, -3.8052e-40, -2.4322e-40,
-5.7959e-40, 6.1966e-40, 3.4964e-40,
-5.6776e-40, -6.8327e-41, -3.3777e-41,
-5.9108e-02, 3.5468e-02, -2.8772e-02,
6.8602e-01, 1.4232e-01, 1.1954e-02,
-3.8234e-02, 7.1837e-02, -1.8832e-02,
4.7972e-02, 1.1623e-02, -2.1687e-03,
-4.9744e-01, 2.7751e-01, 1.7862e-02,
7.4286e-02, 3.1309e-03, 1.1030e-03,
-6.1084e-01, -8.5679e-03, 9.4956e-03,
-4.5246e-01, -1.2126e-01, -3.7368e-02,
2.5624e-02, 1.2087e-02, -1.5431e-02,
6.0313e-40, 1.8404e-40, -7.2006e-41,
6.0697e-40, -9.1199e-41, 5.8965e-40,
5.4830e-40, 1.3014e-40, 1.5585e-41,
-3.6027e-02, -6.3004e-03, 1.5237e-02,
6.0743e-01, 9.2523e-02, -4.7370e-03,
3.4407e-02, -8.3823e-02, 1.6898e-02,
5.7527e-40, -5.0621e-40, -2.9035e-42,
3.8199e-40, -2.2913e-40, -5.0895e-40,
4.0079e-40, 5.1744e-40, -3.3006e-40,
6.1448e-40, 1.2347e-40, -3.1673e-40,
7.3214e-41, 5.2143e-40, -2.6071e-40,
1.6109e-40, -2.0298e-40, 9.5817e-41,
6.9876e-02, -2.9290e-02, 3.2294e-03,
-4.2632e-01, 1.5789e-01, 3.6809e-02,
2.1220e-02, 1.6531e-04, 6.8502e-03,
-6.5221e-02, 8.8059e-02, 5.7934e-03,
-1.7280e-01, 1.5303e-01, 1.7663e-01,
-1.2908e-01, -1.1749e-01, 5.7887e-02,
1.0685e-01, 2.2763e-01, 3.3796e-02,
1.7629e-01, 3.8882e-01, 6.3540e-02,
6.4707e-02, 1.0046e-01, -8.1911e-02,
-3.9718e-03, 4.6416e-02, 4.7357e-02,
7.3694e-02, -1.6444e-01, 2.4784e-02,
-3.0808e-03, 2.7399e-02, -2.9216e-04,
2.4428e-40, -3.0160e-40, 2.3184e-40,
-4.9114e-40, 5.6685e-40, -3.6020e-40,
2.2618e-40, -2.8145e-40, 2.1149e-40,
2.3559e-02, -8.6949e-02, -3.8350e-02,
-2.9547e-01, 7.0187e-01, -8.3979e-02,
-2.8576e-02, -1.6538e-01, -5.2465e-02,
-1.6016e-40, -1.4760e-40, -2.1977e-40,
4.3180e-40, 4.1724e-40, -1.2969e-40,
-1.3023e-40, -1.0095e-40, -1.5965e-40,
-4.0721e-40, -4.1747e-40, -4.3706e-40,
-4.2838e-40, -4.5507e-40, -4.6023e-40,
-3.7435e-40, -3.9889e-40, -4.2249e-40,
-1.2429e-01, -3.5062e-01, -1.1418e-01,
-4.0787e-02, 6.1690e-01, -1.0085e-01,
1.6098e-02, 8.5100e-02, -1.1621e-02,
3.0709e-40, -4.4880e-40, -2.7530e-41,
-1.2649e-40, -5.3936e-40, 5.0995e-41,
4.4003e-40, -2.1211e-40, -6.6422e-43,
-1.8989e-40, -3.6631e-40, 4.1392e-40,
-3.9057e-40, -5.5599e-40, 6.9979e-41,
3.8983e-40, 5.6737e-41, 2.3997e-40,
-9.4862e-41, 2.4256e-40, -3.7040e-40,
1.6374e-40, 3.5439e-42, -1.0385e-40,
3.6145e-40, -2.4342e-41, -3.0115e-40,
-6.0009e-40, -5.2386e-41, -1.2504e-40,
2.9237e-40, -1.2290e-40, -1.1502e-40,
-3.5887e-40, -6.1810e-40, -1.6289e-41,
2.5438e-41, 5.1229e-40, -2.4915e-40,
1.3516e-40, 3.3553e-40, 8.5831e-41,
-8.5122e-41, 3.7625e-41, 2.5507e-40,
-1.5828e-40, 2.1991e-40, -1.5628e-40,
-5.3110e-40, 5.1395e-40, -5.8162e-40,
-3.1571e-40, -5.5139e-40, 1.2299e-40,
4.8855e-40, -9.3940e-41, -6.2534e-40,
-3.3275e-40, -2.4982e-40, -1.2956e-40,
-6.0047e-40, -1.8712e-41, -7.3274e-42,
-2.8519e-40, 3.5541e-40, 2.4485e-40,
-8.1435e-41, -2.7091e-40, 7.1206e-41,
-5.9519e-41, -2.5552e-40, -3.6189e-40,
7.7038e-02, -1.6317e-02, -2.4118e-02,
-4.3086e-02, -2.1512e-01, 1.2288e-01,
1.8237e-01, -1.5438e-01, -1.1346e-01,
-4.6141e-02, -4.0750e-02, -5.6414e-04,
-1.5640e-01, -3.4506e-01, -1.4441e-02,
-2.0278e-01, -3.1403e-01, -6.2542e-02,
-1.9622e-02, 1.6348e-02, 6.9859e-03,
-9.3142e-02, 1.0368e-02, -5.6585e-02,
8.4213e-02, 1.0776e-01, -1.0315e-01,
8.7873e-41, -5.3947e-40, 1.1714e-40,
7.5534e-41, -1.1871e-40, -5.4012e-40,
3.8269e-41, -1.4913e-40, -3.1802e-40,
-3.4707e-02, 1.2518e-02, 9.4679e-03,
1.2254e-01, 1.9394e-01, 2.6530e-02,
2.2413e-01, -1.6298e-01, -6.1446e-02,
-1.1042e-42, -2.7255e-40, -5.5067e-40,
3.8272e-40, 4.9956e-40, -3.2074e-41,
2.8351e-40, 4.2501e-40, 3.9389e-41,
6.1941e-40, -4.8790e-40, -3.4137e-40,
2.2577e-40, -5.7183e-40, -8.6861e-41,
5.7021e-40, -3.2349e-40, 1.9655e-40,
9.1180e-02, 5.6665e-02, -6.5437e-04,
1.1759e-01, 2.7517e-01, 1.9143e-01,
9.7905e-02, 6.6707e-02, 8.6535e-02,
8.8717e-03, 3.0913e-02, 6.6909e-03,
-8.1791e-02, -4.7883e-01, 7.4920e-02,
4.5843e-01, -1.0410e-01, 1.6655e-01,
-4.7094e-03, 3.4769e-02, -1.3291e-02,
-8.5570e-03, -4.0038e-01, 1.8418e-01,
-1.4696e-01, 3.2279e-01, 2.5712e-02,
-2.6207e-01, -4.6150e-02, -6.4099e-02,
-3.2623e-01, -1.8984e-01, -5.7891e-02,
-2.2088e-01, -4.2042e-02, -2.5307e-02,
1.0260e-40, 5.0443e-40, 7.5150e-41,
1.4402e-40, -5.1952e-40, -5.3810e-40,
6.2240e-40, 1.8661e-40, -8.2983e-41,
7.1850e-02, 4.8770e-02, -1.5081e-02,
4.8072e-01, 2.5477e-01, 3.8197e-02,
2.6011e-01, 2.4610e-01, -3.6167e-02,
3.8901e-40, 1.6760e-41, 2.8471e-40,
3.1983e-40, 1.2460e-40, -4.3961e-40,
3.9187e-40, 2.7818e-40, -9.1501e-41,
-2.3320e-40, -1.9998e-40, -2.8132e-40,
-2.9552e-40, -3.9643e-40, -5.1375e-40,
-1.6686e-40, -5.3138e-40, -2.6988e-40,
2.5623e-02, 2.6942e-02, 2.4342e-02,
-9.9084e-02, 5.2974e-01, -6.7983e-02,
-2.2454e-01, 1.1507e-01, 2.0364e-02,
3.4852e-01, -3.1091e-01, 8.1154e-02,
-3.2205e-01, 1.7103e-01, 2.4162e-01,
-2.6892e-03, 2.4142e-02, 5.5540e-02,
-4.5753e-02, -5.0097e-01, 1.7503e-01,
1.4058e-01, 1.1311e-01, 1.5945e-01,
-5.3975e-02, 5.2326e-02, -6.2382e-02,
9.4114e-02, -5.6812e-01, -1.2081e-01,
-8.5809e-02, -9.8661e-03, -2.3064e-02,
-1.6453e-03, -1.8328e-02, 2.4282e-03,
1.5943e-40, 4.6894e-40, -6.2730e-40,
3.8054e-40, -3.7914e-41, -1.4429e-40,
1.6925e-40, 5.1566e-41, -1.7909e-40,
-3.7920e-02, 2.4698e-01, 5.0019e-02,
-1.4246e-02, 2.8739e-01, -5.4704e-02,
7.9436e-02, -2.7838e-02, -3.4191e-02,
-3.3565e-40, 2.1368e-40, 6.7346e-42,
5.6681e-40, -5.5776e-40, -2.7705e-40,
-2.2966e-40, 1.1692e-40, -2.5187e-40,
4.4806e-40, -4.8424e-40, -9.1436e-41,
-4.3250e-40, -2.0721e-40, -2.0050e-40,
-5.1061e-40, 2.6405e-40, -3.0913e-40,
-1.2078e-01, 3.1948e-01, 1.0082e-02,
-1.0781e-02, 8.0720e-02, -4.6330e-02,
-1.8084e-02, -2.2846e-02, -5.5861e-03,
-3.2400e-02, -1.7329e-01, -2.7995e-02,
-5.3680e-02, 4.1310e-01, -9.4691e-02,
7.6938e-02, -4.9596e-02, 1.9649e-01,
3.2594e-02, 1.1544e-01, -1.8501e-02,
7.0248e-02, -6.9838e-02, -5.4278e-02,
-2.9317e-02, -1.4890e-01, 7.8661e-02,
3.7685e-02, 5.9594e-02, 8.9527e-02,
2.2957e-01, -2.9681e-01, -1.6329e-01,
-1.3206e-01, -4.3808e-02, 3.8854e-02,
1.7529e-40, -3.8429e-41, 1.4443e-40,
-4.0829e-40, -2.5643e-40, -5.4821e-40,
1.6827e-40, -1.1628e-40, 2.2441e-40,
5.2451e-02, 1.0179e-01, 4.8487e-02,
-2.1020e-01, -4.4345e-01, -8.7642e-02,
7.0958e-02, 1.9934e-01, -2.1090e-02,
-3.0795e-41, 2.7921e-40, 2.8491e-40,
-2.1154e-40, 9.8876e-41, -8.8824e-41,
2.6552e-40, 2.5767e-40, -3.8369e-40,
6.1348e-40, -3.4170e-40, -1.7109e-40,
-3.3080e-40, 5.4199e-41, -1.7512e-40,
1.8363e-40, -4.4080e-40, -2.5508e-40,
-4.0716e-02, -2.8531e-01, 3.9981e-02,
2.2278e-02, 5.6661e-01, -8.3890e-02,
-7.7331e-02, -9.3843e-02, 1.5584e-02
}
,
{
-3.6751e-40, -5.4562e-41, 6.1860e-40,
8.9003e-41, 5.5262e-40, 3.9537e-40,
-2.1258e-42, -3.1069e-40, -7.6225e-41,
-1.2220e-02, -8.6886e-02, 1.0714e-02,
1.1656e-02, -7.3635e-02, 5.9427e-02,
4.8518e-03, 1.3543e-01, 1.4668e-02,
-1.7505e-02, -2.0691e-02, -1.4507e-02,
2.6157e-02, 7.4109e-02, 1.2822e-02,
-1.9737e-02, -4.9281e-02, 8.5962e-03,
5.6236e-40, 2.4616e-40, 1.6384e-40,
-3.9469e-40, -1.7094e-40, 1.9285e-40,
-1.3634e-40, -1.5785e-40, 6.4184e-41,
-1.2752e-02, 2.3150e-02, -5.3355e-03,
-5.9667e-02, -3.9580e-01, -7.0033e-02,
-2.2612e-02, 1.9176e-02, 1.0588e-02,
8.0027e-04, 3.2242e-01, -2.2566e-02,
8.7850e-03, -2.4025e-01, 4.6123e-02,
-1.9038e-02, -8.5750e-03, -4.8153e-03,
-1.3049e-03, -5.7771e-03, 9.6437e-03,
3.2477e-02, 2.4482e-01, 4.0580e-02,
1.3194e-02, -4.6602e-01, -6.6163e-02,
-1.0647e-01, 7.3328e-02, 2.5871e-02,
-7.0883e-02, -9.2725e-02, -1.5185e-02,
1.1804e-02, 1.7784e-03, -4.4099e-03,
-4.9226e-40, -1.3081e-40, -3.5969e-40,
4.3539e-40, -2.9631e-40, 2.3531e-41,
5.6191e-40, 6.1545e-41, -1.1112e-40,
-1.1880e-02, -3.1884e-02, -2.0850e-02,
-6.8633e-03, 1.6422e-01, 1.0281e+00,
3.5887e-03, 2.1180e-01, -1.0094e-01,
-1.5103e-02, -4.9074e-02, -1.7702e-02,
7.2119e-02, 3.3199e-02, -9.7082e-04,
5.5383e-02, 1.0343e-01, 2.5156e-02,
2.9049e-40, -1.6397e-40, -8.8848e-41,
-6.2827e-40, 8.1281e-41, 5.2909e-40,
-4.1132e-40, 1.5751e-40, 1.5400e-40,
-7.3765e-02, -4.9723e-02, 4.9357e-02,
-2.4207e-02, -1.0291e-01, -1.4001e-03,
-1.2751e-02, 4.2805e-03, 1.8934e-03,
2.6862e-02, 1.1634e-01, 4.5666e-02,
-4.7351e-03, -4.1593e-01, 3.6082e-02,
1.1446e-02, -5.2026e-03, 1.8672e-02,
-7.0960e-04, -6.7877e-03, 9.6674e-03,
-4.9952e-03, 8.8664e-02, -2.7707e-02,
8.5309e-02, 5.5513e-02, -7.6230e-02,
3.6354e-02, 9.7794e-02, 1.1687e-02,
2.6847e-02, 3.2565e-01, -8.7710e-03,
-2.0372e-02, -1.9090e-02, -3.2566e-03,
-5.5592e-40, 7.4408e-41, 3.5576e-40,
2.7758e-40, 4.5458e-41, -6.2347e-40,
9.9739e-41, -1.6078e-40, -5.2900e-40,
1.1500e-02, -3.0675e-01, -3.0079e-02,
1.5080e-02, -2.4292e-01, 1.2736e-01,
-1.9513e-02, -1.9376e-02, -8.5960e-02,
-1.0241e-01, -2.1312e-02, -3.1999e-02,
-6.3598e-02, 1.5187e-01, 1.2279e-01,
1.5695e-03, 1.1376e-01, 5.2648e-03,
2.6415e-40, 3.0508e-40, 3.6407e-41,
-1.4403e-40, 2.8942e-40, -1.0089e-40,
2.2362e-41, 1.9843e-40, -1.5509e-40,
1.3269e-01, -3.1031e-01, -4.4091e-02,
4.6385e-03, 2.1411e-02, 5.7141e-02,
2.0724e-02, -3.5406e-02, 2.5717e-03,
-5.5922e-02, 7.1404e-01, -2.9852e-02,
1.3041e-02, 3.9373e-02, -2.4515e-01,
4.4278e-03, 2.1557e-02, -8.4940e-03,
1.3677e-02, -3.5183e-02, 1.2391e-02,
-9.2405e-02, 2.9650e-01, 6.9695e-02,
-3.3125e-02, 3.4700e-01, 1.4552e-01,
2.7357e-02, 5.2133e-01, -5.7571e-02,
2.7580e-02, 1.0381e-01, 1.3678e-02,
4.9260e-03, -4.4419e-02, 7.0651e-04,
2.9472e-40, -5.2892e-40, -3.6567e-40,
4.9403e-40, -6.2132e-40, -6.2920e-40,
-1.5156e-40, -3.6134e-40, 5.2432e-40,
-5.0427e-03, -2.8247e-03, -5.3734e-02,
-1.5918e-02, 1.8325e-01, -1.7834e-01,
-5.1774e-03, 8.0009e-02, 5.6296e-03,
3.1480e-02, 2.0665e-02, 2.7806e-04,
7.3085e-02, 7.7660e-01, 1.1979e-01,
1.9979e-02, 1.6629e-01, 2.3216e-02,
-5.9701e-40, 9.5583e-41, 1.8231e-40,
-3.3216e-40, -4.1253e-40, -3.3326e-40,
1.7131e-40, 2.9588e-40, -2.2520e-40,
-1.3337e-01, -4.2777e-01, -1.3569e-01,
2.9915e-02, -2.7016e-01, -3.7454e-03,
-1.3574e-02, -3.6298e-02, -1.6571e-02,
4.2530e-02, -4.2299e-02, 1.4320e-01,
1.4371e-02, -1.1289e-01, -3.8829e-02,
5.1689e-03, 1.5804e-02, 1.6125e-03,
-3.4601e-03, -7.2087e-03, -5.5514e-04,
4.4568e-02, 1.3621e-01, -4.3811e-02,
1.1350e-02, -2.8417e-01, 3.1553e-02,
-7.8854e-02, -2.0316e-01, 7.7746e-03,
-1.1437e-02, 2.1557e-01, -1.9479e-02,
-1.3511e-02, -2.0339e-02, -1.0276e-02,
-8.8977e-41, 5.9533e-40, -3.1413e-40,
-3.1892e-40, 5.5204e-40, -5.0634e-40,
-2.4932e-41, 4.3474e-41, 6.2961e-40,
4.7864e-03, 5.7125e-02, -1.5468e-02,
-3.9614e-03, -2.9042e-02, 2.8347e-01,
-1.0133e-02, 8.2745e-02, -1.0450e-01,
5.9537e-03, 1.4050e-02, 1.9802e-04,
2.4964e-02, 1.3077e-01, -4.7314e-02,
6.2744e-03, -1.9068e-01, 5.2593e-02,
-2.0550e-40, -2.4231e-40, 3.3927e-40,
-3.9609e-41, 2.2262e-40, 1.8866e-40,
2.0788e-40, -1.8012e-40, -1.9375e-40,
-4.7530e-03, -1.2315e-01, 8.2373e-03,
-9.2412e-02, 1.7156e-01, 1.1176e-02,
-1.4081e-02, 1.4694e-02, -1.9475e-02,
-1.5269e-02, -3.8430e-02, -7.4717e-02,
3.3361e-02, -1.1956e-01, 4.2304e-01,
-2.9924e-03, -3.3035e-02, -3.6560e-02,
-1.2386e-02, 6.3762e-03, -3.7047e-02,
1.3839e-02, -3.6358e-02, 4.3609e-02,
-8.3692e-03, 4.5794e-01, -3.0761e-01,
2.2287e-02, 2.5360e-02, -6.1253e-03,
-1.8992e-02, -4.0078e-01, 7.3821e-02,
5.6517e-03, 4.2348e-02, -2.5642e-02,
5.5659e-40, -6.1219e-40, 4.1493e-40,
5.7719e-42, -3.7181e-40, -3.3260e-40,
-4.8241e-41, 5.2207e-40, -1.2199e-40,
-1.2074e-02, 1.7647e-01, 1.1882e-02,
6.4764e-03, -2.3742e-01, -1.8033e-01,
2.5866e-02, 6.5985e-02, 3.7191e-02,
5.1047e-02, -3.0457e-02, 1.2531e-02,
-1.3252e-01, 1.2593e-01, -6.3717e-02,
4.0794e-02, -1.4786e-02, 1.7139e-02,
2.4343e-40, -1.7451e-40, 2.0169e-40,
-5.5166e-40, 2.4201e-40, -2.5701e-40,
2.9947e-40, 2.9321e-40, -1.6015e-40,
-3.6598e-02, -1.8520e-03, -1.6999e-01,
-8.6806e-02, -7.7266e-02, -9.6042e-02,
-2.1342e-02, 2.5793e-02, -7.2541e-03,
3.0667e-02, -2.6287e-01, 3.0592e-02,
-4.5559e-02, -1.4716e-01, 2.0932e-01,
-5.8472e-03, -1.0023e-02, 1.2134e-02,
-1.3284e-02, 2.0538e-02, -5.4476e-04,
5.8096e-02, -1.4790e-02, -2.0158e-02,
-3.9654e-02, -2.2069e-01, -1.5089e-01,
-1.8966e-01, -1.6834e-01, 9.8934e-02,
8.2326e-02, 7.5585e-02, -1.7188e-02,
-1.4985e-02, 2.1823e-02, -7.7015e-03,
1.8353e-40, 4.8298e-40, -2.0568e-40,
-3.7196e-40, -5.7237e-40, 1.0648e-40,
9.4960e-41, 3.0411e-40, 1.3294e-40,
-1.4884e-02, 4.9767e-02, -3.0288e-02,
8.9874e-03, -1.0290e-01, 3.1344e-01,
5.9735e-03, -2.0813e-01, -6.6145e-03,
1.6592e-02, 3.0529e-05, -1.0180e-02,
-4.8683e-02, 1.4025e-01, 2.9237e-02,
-2.3334e-02, -9.6638e-02, -1.0268e-02,
-4.9497e-41, -5.6377e-40, -2.0142e-40,
2.1230e-40, 1.6067e-40, 3.4830e-40,
-4.9031e-40, -3.0290e-40, -2.9060e-40,
3.4053e-02, -8.9560e-02, -4.4479e-02,
4.2128e-02, 6.9253e-02, -7.1096e-03,
4.2358e-02, -1.7215e-02, 9.0389e-03,
1.8129e-02, -1.4785e-01, 1.1267e-01,
-7.1637e-02, 5.5595e-01, -1.0569e-02,
1.8481e-02, -4.7556e-02, -1.1185e-02,
-1.1766e-02, -8.5959e-03, -3.0046e-02,
-2.1081e-03, 1.1518e-01, -8.4419e-02,
-7.5829e-02, 1.8199e-01, -9.7726e-03,
3.6473e-02, 1.8761e-01, 4.9495e-03,
-6.9640e-02, -2.8775e-01, 3.6149e-02,
9.6345e-04, 1.3967e-02, -6.0015e-03,
2.9861e-40, 3.9190e-40, 5.3741e-40,
3.8059e-40, 4.7113e-40, 5.9498e-40,
-5.0640e-40, -4.1610e-40, 6.2009e-40,
-2.3464e-03, -7.3888e-02, 3.4701e-02,
-5.2257e-04, 3.8444e-02, -5.3735e-01,
-1.7970e-03, 9.0298e-02, 5.3151e-02,
-2.6033e-02, 1.2973e-02, 4.9147e-03,
2.3005e-02, 1.7045e-01, 2.4715e-02,
2.7981e-02, -8.4662e-02, -9.4778e-03,
5.3019e-40, -2.1800e-40, 1.5281e-40,
-1.0282e-40, 1.8040e-41, 1.3929e-40,
-5.9679e-40, -5.2958e-40, 1.4429e-40,
3.4325e-02, -1.7240e-01, -4.9645e-02,
-2.4341e-02, 5.2652e-02, -1.1188e-02,
-3.6336e-03, 4.2148e-04, 3.3086e-03,
5.5059e-03, 1.7744e-01, -2.8681e-02,
-3.4868e-03, -1.4569e-01, 1.6508e-02,
4.6766e-03, -1.7963e-02, -2.6397e-03,
4.3618e-03, -4.2793e-03, -4.7820e-04,
-4.2795e-02, 2.0070e-01, 3.8402e-02,
5.0586e-02, 2.1910e-01, -3.4381e-02,
5.7625e-02, 4.2314e-01, -1.9732e-02,
3.4811e-02, -2.3033e-01, 1.1477e-02,
-7.3744e-03, 1.9112e-02, 4.2251e-03
}
};
static __device__ __constant__ const float HDNL0biasL[8][8] =
{
{
0.0272, -0.5743, -0.0333, -0.0334, 0.0082, -0.0263, -0.0048, -0.0167
}
,
{
-0.0239, -0.0385, 0.0026, 0.0288, -0.0225, 0.0082, -0.0191, -0.0185
}
,
{
-5.8305e-03, -8.6574e-02, 4.2228e-02, -4.3500e-02, -8.1892e-04, 3.3171e-03, -1.1582e-02, -4.1205e-40
}
,
{
-0.0053, 0.0053, -0.0114, -0.0127, -0.0039, -0.0426, 0.0053, -0.0017
}
,
{
-0.0046, -0.0104, -0.0087, -0.0040, 0.1077, 0.0347, -0.0165, 0.7296
}
,
{
8.7612e-02, 5.9126e-01, 4.6709e-03, -1.1559e-39, 2.3381e-02, -1.2136e-40, -5.6040e-39, 3.7100e-02
}
,
{
-3.3246e-39, -1.4536e-02, -6.3362e-02, 8.5347e-41, 7.9956e-02, 3.0679e-04, -1.0257e-02, -1.2037e-02
}
,
{
-0.0006, 0.0117, 0.0083, 0.0686, -0.0046, 0.0015, -0.0076, 0.0079
}
};
static __device__ __constant__ const float HDNL0kernelsL10[4 * 8] =
{
0.4908, -0.0457,
-0.1716, -0.2115,
-0.0015, -0.3152,
0.3045, 0.0330,
-0.2981, 0.0912,
0.0122, 0.2281,
0.3331, 0.2853,
0.2210, 0.2611,
0.2364, 0.0792,
0.2885, -0.7122,
-0.3715, 0.1404,
-0.0260, 0.2144,
0.2378, 0.1570,
-0.5734, 0.2077,
-0.0851, 0.2771,
0.0415, -0.1858
};
static __device__ __constant__ const float HDNL1kernelsL1[9 * 8] =
{
-6.6326e-02, -2.2316e-01, 4.2471e-02,
1.7064e-02, -6.8305e-01, -1.5978e-01,
6.7568e-01, 3.2212e-01, 8.3561e-02,
-4.6649e-01, -6.8789e-02, 5.3455e-01,
-5.0941e-01, 7.0657e-02, 4.5647e-01,
-2.3657e-02, 3.5302e-02, -1.8316e-02,
-2.0316e-01, 4.7021e-02, -2.2313e-01,
5.3465e-02, 7.0750e-01, 9.1366e-02,
-2.8566e-01, -2.0521e-02, -7.1786e-02,
4.8186e-02, -9.3429e-02, 2.4493e-03,
3.4654e-01, 7.2625e-02, 1.6615e-01,
3.2101e-01, 3.2923e-01, -9.8548e-02,
1.1916e-02, 2.0413e-01, -1.8920e-02,
6.0858e-02, 8.3548e-01, 1.4060e-01,
-9.1827e-01, -2.4551e-01, -4.6118e-02,
-5.2737e-02, 4.3151e-01, 1.7027e-01,
2.6647e-01, 5.5240e-01, 3.4745e-03,
5.3495e-02, -4.7059e-02, -2.6593e-02,
1.5691e-01, 4.7332e-01, 2.6651e-03,
1.7997e-02, 4.1367e-01, 1.3239e-02,
4.6932e-02, 1.0278e-01, 1.0699e-02,
-3.4319e-02, -7.6373e-01, -9.7022e-02,
-1.4160e-01, 2.9567e-01, 6.6220e-01,
7.3508e-05, 1.2683e-01, -6.3442e-02
};
static __device__ __constant__ const float HDNL1biasL1[8] =
{
-0.0264, -0.0229, -0.3021, -0.2579, -0.0327, -0.0053, -0.7777, 0.0232
};
static __device__ __constant__ const float HDNL1kernelsL[8][9 * 8 * 8] =
{
{
-7.8588e-41, -5.0770e-40, -2.3334e-40,
5.7174e-40, 6.9060e-41, 2.2264e-40,
-4.1631e-40, 4.5667e-40, -1.8115e-40,
-3.1000e-40, 3.1019e-40, 5.5423e-40,
-5.8518e-40, 2.1290e-40, -5.4579e-40,
-3.7753e-40, 3.6029e-40, -1.7875e-40,
4.2296e-40, 6.5672e-41, 1.4976e-40,
-3.1479e-40, -3.2881e-40, -5.9818e-40,
3.2053e-40, 3.0821e-40, 5.1321e-40,
-2.6557e-17, -3.8205e-17, -3.7077e-17,
-2.5168e-17, -3.4817e-17, -3.4186e-17,
-1.8056e-17, -2.3105e-17, -2.2581e-17,
5.9355e-40, 2.4052e-40, -1.0027e-40,
2.2060e-40, 3.4864e-40, -5.7403e-40,
4.6936e-40, -3.3951e-40, -4.7715e-40,
-9.7917e-11, -1.0331e-10, -9.6141e-11,
-1.0581e-10, -1.1173e-10, -1.0317e-10,
-1.0192e-10, -1.0681e-10, -9.8738e-11,
-1.0402e-29, -2.3233e-29, -1.7882e-29,
-1.4804e-29, -3.7821e-29, -3.0750e-29,
-1.0448e-29, -2.6740e-29, -2.1676e-29,
4.2124e-40, 2.5024e-40, 4.5312e-40,
-2.4880e-40, 2.9838e-41, -2.7215e-41,
-2.6347e-40, 1.5950e-40, 9.3734e-41,
-1.4936e-01, -1.0438e-01, 2.9827e-02,
1.4751e-02, -1.6854e-01, -8.8101e-02,
4.9228e-02, -3.0744e-02, -1.1512e-01,
-3.4996e-02, -2.5024e-02, -1.8880e-02,
3.0008e-02, 4.8689e-02, -1.3415e-01,
-9.1698e-03, -1.1019e-02, -5.0655e-02,
-6.6579e-02, -2.6447e-02, 1.9791e-02,
-4.1727e-02, 3.6433e-02, 3.1516e-02,
-5.7619e-02, 2.3401e-02, 3.0785e-02,
-3.3610e-02, 1.2263e-01, 2.4351e-02,
1.7148e-02, 1.7144e-01, 4.0305e-02,
8.7902e-03, -7.0077e-02, -1.0688e-01,
4.7460e-02, -1.4093e-03, -1.5911e-02,
-2.2978e-02, 9.9025e-02, 1.2867e-02,
3.4704e-02, 1.4672e-01, 7.9188e-02,
-4.4222e-02, -3.9480e-02, -1.9193e-01,
-3.1897e-02, 1.0776e-01, -5.2742e-02,
8.0377e-02, 2.5764e-01, -9.7330e-02,
-1.1593e-01, -5.3753e-02, -2.8918e-02,
6.7939e-02, 2.3963e-01, 2.0856e-01,
2.7964e-02, 2.7781e-01, 2.1859e-01,
-1.5196e-02, 9.6704e-03, -8.0136e-02,
8.9441e-02, 1.0314e-01, -2.0204e-02,
-3.3970e-02, -1.4562e-02, 3.4723e-02,
2.3357e-40, -1.4361e-40, 2.0498e-40,
-5.2355e-40, -6.0151e-40, -2.9264e-40,
1.9715e-41, 5.9793e-41, -1.3675e-40,
5.3771e-40, 6.5637e-41, -3.8471e-40,
-3.0820e-40, -1.7004e-40, -1.9371e-40,
-5.1159e-40, 7.3244e-41, 3.5861e-41,
2.8441e-40, 4.5248e-41, 1.9771e-40,
-2.4681e-40, 3.6054e-40, 3.3496e-40,
-6.5048e-42, -1.6001e-40, 4.8243e-41,
-1.0165e-08, -9.9140e-09, -9.6054e-09,
-1.0511e-08, -1.0256e-08, -9.9066e-09,
-1.0521e-08, -1.0320e-08, -9.9896e-09,
2.6042e-40, 4.2016e-40, 5.3537e-40,
1.4594e-40, 1.1344e-40, 3.5144e-40,
-2.5736e-37, -1.3591e-39, 2.1029e-40,
-3.1420e-07, -3.0309e-07, -2.9630e-07,
-3.1196e-07, -2.9967e-07, -2.9249e-07,
-3.1296e-07, -3.0086e-07, -2.9332e-07,
-6.1256e-12, -5.9283e-12, -5.6508e-12,
-6.5297e-12, -6.4118e-12, -6.0667e-12,
-6.8382e-12, -6.8547e-12, -6.5225e-12,
-5.0327e-26, -1.0795e-25, -1.8952e-25,
-2.4220e-26, -5.9067e-26, -1.1323e-25,
-2.1499e-27, -5.5342e-27, -1.0333e-26,
4.5039e-03, -1.3303e-02, 1.6183e-01,
6.5951e-02, -7.1353e-02, 1.7254e-01,
-1.8671e-03, 1.0593e-01, -3.6872e-02,
4.9102e-02, -2.4075e-03, 4.8194e-02,
-7.0892e-02, -1.8948e-01, -1.6586e-01,
-2.8102e-02, 2.0870e-02, 5.9228e-02,
1.2673e-02, 3.3908e-02, 4.8282e-02,
4.4369e-02, 5.6304e-02, 1.2225e-02,
4.1855e-02, 1.1990e-01, 6.3799e-02,
-7.3884e-02, 1.4153e-02, 9.5825e-02,
4.2850e-02, -3.5337e-02, 1.3615e-01,
-2.0900e-01, -2.2835e-02, -8.6987e-02,
-6.7793e-02, 1.3547e-01, -9.9666e-02,
3.5498e-02, 5.3725e-02, 1.1501e-01,
-1.2238e-01, 3.5354e-02, 7.4216e-02,
-3.5288e-02, 7.0111e-03, 2.4820e-02,
-1.0649e-02, 1.6715e-01, 1.2825e-01,
3.1145e-02, 1.2097e-01, -1.2073e-02,
-7.0603e-02, 5.5574e-02, -5.0025e-02,
-8.2885e-02, 1.0957e-01, 1.3311e-01,
2.9147e-02, -1.1849e-02, 8.9953e-02,
-3.2247e-02, -1.0747e-02, 9.1431e-03,
1.2114e-01, -5.9780e-02, 5.4821e-02,
-5.2592e-02, -6.9082e-02, -7.5981e-02,
-7.8533e-02, 1.3658e-01, 1.0923e-01,
-3.2530e-02, -2.1342e-01, -1.2200e-01,
-1.9196e-02, 1.0450e-01, -8.9044e-02,
-2.0110e-02, 6.1439e-02, -2.7405e-02,
6.0823e-02, -6.4268e-03, -9.1778e-03,
6.4877e-02, -6.1227e-02, -5.4466e-02,
9.6375e-02, 1.7519e-01, 5.0725e-03,
1.9159e-01, 3.9725e-01, 1.2851e-01,
-6.9197e-02, 4.9372e-02, -3.4221e-02,
1.1583e-01, 1.3389e-01, 2.9135e-01,
1.0290e-02, 1.1214e-01, 1.7560e-01,
-1.8048e-02, 8.4782e-02, 4.9925e-02,
-3.8447e-02, -1.3156e-01, -1.1072e-01,
1.8256e-01, 2.2831e-01, -1.6508e-01,
4.6781e-02, 1.4913e-01, -8.6956e-02,
5.1365e-04, 6.7873e-02, -3.4787e-03,
1.7689e-01, 1.8414e-01, 2.2286e-01,
1.2571e-01, 1.7687e-01, 1.5949e-01,
5.9904e-02, 1.6259e-01, 1.4313e-01,
2.2234e-01, 4.0943e-01, 3.1469e-01,
1.9799e-01, 4.3052e-01, 3.0510e-01,
1.2259e-01, -1.0778e-02, 6.2284e-03,
1.4508e-02, -6.9073e-02, 5.0998e-02,
5.2962e-02, -1.5291e-01, -1.0491e-02,
-8.6903e-02, -1.0430e-01, 3.0130e-02,
4.1691e-02, -1.2675e-01, -5.5169e-01,
8.9644e-02, 3.6910e-02, -1.5459e-01,
5.3656e-03, 6.7936e-02, 1.0793e-01,
-2.7424e-02, -1.7652e-01, -3.5776e-01,
2.4593e-02, -5.6237e-01, -5.9038e-01,
-9.4807e-02, -7.5681e-02, -3.6990e-02,
8.7385e-03, -5.7989e-02, -4.9573e-02,
-7.7422e-02, -1.1899e-01, -7.4023e-02,
9.1539e-03, -1.1760e-01, 4.6825e-02,
1.9901e-02, -3.9718e-02, 1.2997e-02,
4.2209e-02, -5.2119e-02, -1.2255e-01,
2.4262e-02, 5.3676e-02, -2.4767e-01,
-4.2933e-02, -2.2473e-01, -4.0310e-01,
-3.5160e-02, 1.9858e-01, -1.5943e-01,
1.3208e-01, -1.0493e-01, -6.7076e-02,
-2.5244e-01, 1.1175e-02, 2.5568e-01,
-3.3867e-01, 3.1953e-02, 5.9426e-01,
4.0551e-02, 4.4914e-03, -1.9348e-02,
-6.7386e-02, -1.5543e-01, -3.0883e-02,
8.9177e-02, -4.6432e-02, 6.8227e-02,
8.7784e-02, 3.6127e-02, -2.0375e-02,
4.5461e-02, -4.9071e-02, 9.9435e-02,
-2.5700e-01, -2.7706e-01, 6.2776e-02,
-6.9571e-02, -5.7888e-03, 9.3852e-02,
2.8490e-02, -2.7854e-01, 1.4209e-01,
1.5373e-02, -4.3503e-02, 9.6895e-02,
1.1682e-02, 1.5608e-01, 1.5844e-01,
5.8027e-02, 2.6632e-02, -8.5479e-03,
1.2836e-01, 2.0714e-01, 1.0228e-01,
1.4647e-02, 5.7609e-02, -1.6728e-02,
2.1212e-01, 3.2673e-01, 4.5670e-02,
-6.0844e-02, -1.1768e-01, -1.1233e-01,
5.0123e-04, 6.3947e-02, -1.8356e-01,
1.4091e-01, -2.1568e-02, 8.5933e-02,
-3.9406e-02, 8.2921e-02, -1.0601e-01,
4.1284e-02, -7.3138e-02, 1.7264e-01,
2.5883e-02, 5.2945e-01, 2.4510e-01,
2.7291e-03, 4.0173e-02, 7.8221e-03,
-3.5795e-02, -4.8631e-03, -2.2715e-01,
1.2330e-01, 7.1739e-01, -4.1725e-01,
7.5106e-02, 2.5267e-02, -2.8655e-01,
-7.8731e-02, -7.5747e-03, -5.5601e-02,
7.9764e-02, 1.0524e-01, 8.6742e-03,
2.1791e-02, 3.7304e-02, -1.1534e-01,
-1.2011e-01, -7.5160e-02, 1.3737e-02,
-2.9470e-01, 2.6613e-01, -2.3740e-02,
1.2957e-01, 1.4752e-01, -9.3655e-02,
2.9828e-02, 2.0664e-01, 1.9731e-02,
-8.0378e-02, -3.9481e-01, -1.5395e-01,
-5.7944e-02, -8.6343e-02, -5.4324e-02,
7.1664e-02, 1.5294e-01, -1.2112e-02,
2.1023e-02, 1.1945e-01, -7.2998e-02,
-1.1693e-02, -1.8818e-01, -9.8693e-02,
-6.7017e-02, 6.9767e-02, -5.0268e-02,
-9.1106e-03, 2.4267e-01, 6.0277e-02,
3.5269e-02, 7.7376e-02, 1.6642e-02,
-5.2600e-02, -1.8864e-01, -1.1195e-01,
3.2119e-01, -9.7913e-02, 1.4734e-01,
8.6988e-02, -5.3563e-03, -2.6136e-03,
-9.1528e-03, 2.8186e-01, -1.5933e-01,
4.8499e-02, 4.5189e-01, -1.6399e-01,
5.8164e-02, 6.3251e-02, -2.8738e-02,
2.0424e-01, -7.2819e-02, 2.1903e-02,
-3.5630e-01, 1.3171e-01, -7.6749e-02,
3.8848e-02, 1.7902e-01, -1.1902e-01,
-4.4221e-02, 1.5032e-02, 2.9078e-02,
-1.9738e-01, -1.4878e-02, 1.3315e-02,
1.3956e-02, 1.2856e-01, 7.0688e-02,
2.0933e-01, 1.7286e-01, 6.7601e-02,
5.5136e-01, 4.6866e-01, 1.8402e-01,
2.2362e-01, 2.4124e-01, 1.3167e-01
}
,
{
-5.2308e-12, -5.4024e-12, -5.0039e-12,
-5.4553e-12, -5.6928e-12, -5.2812e-12,
-5.0230e-12, -5.2150e-12, -4.9133e-12,
5.7994e-02, 1.0051e-01, -1.0618e-01,
6.8090e-02, 1.2789e-01, 1.1380e-01,
-1.5882e-01, 8.2323e-03, -9.1424e-02,
2.0132e-07, 2.0907e-07, 2.1344e-07,
2.1179e-07, 2.2018e-07, 2.2381e-07,
2.1095e-07, 2.1920e-07, 2.2150e-07,
2.9336e-02, 5.4427e-02, -1.2082e-01,
5.8399e-02, 2.2261e-01, 1.1165e-01,
-9.6098e-02, 8.3175e-02, -6.5909e-02,
1.2007e-01, 1.9776e-01, 7.7464e-02,
6.7018e-02, 3.6536e-01, 1.3796e-01,
6.0724e-02, 4.6161e-02, 2.3740e-01,
-2.1117e-02, -2.0200e-02, 9.3703e-02,
-4.6932e-02, -1.5910e-01, 8.8094e-02,
-5.6641e-02, -1.7146e-01, -1.0502e-01,
-2.5624e-01, 1.6049e-01, -3.3267e-02,
-2.3248e-01, 5.4036e-01, 1.0027e-01,
-2.1680e-01, -7.0096e-03, -1.0692e-01,
-4.8357e-02, 2.5107e-01, 4.8323e-02,
9.7245e-02, 5.5015e-01, -3.4641e-01,
1.2458e-02, -1.3626e-01, -4.1992e-01,
-2.1359e-40, -1.4250e-40, -4.7123e-40,
-5.9433e-41, 1.9903e-41, -1.7701e-40,
-5.9941e-40, -5.8562e-40, -5.0226e-40,
-2.6581e-40, 1.3006e-40, -1.4201e-40,
5.4264e-40, 2.3848e-40, 5.6412e-40,
-2.6378e-41, -5.7132e-40, -4.1343e-40,
-3.2848e-22, -3.6697e-22, -3.4147e-22,
-3.5780e-22, -3.9435e-22, -3.5989e-22,
-3.1212e-22, -3.4305e-22, -3.0670e-22,
-1.1749e-08, -1.1602e-08, -1.1494e-08,
-1.2125e-08, -1.1918e-08, -1.1718e-08,
-1.1779e-08, -1.1623e-08, -1.1559e-08,
-5.0237e-07, -4.9179e-07, -4.6744e-07,
-5.1967e-07, -5.0826e-07, -4.8421e-07,
-5.0226e-07, -4.9668e-07, -4.8019e-07,
5.6433e-41, -3.0514e-40, -5.4526e-40,
1.1125e-41, 2.9485e-40, 5.5282e-40,
3.0229e-40, 1.5915e-40, 5.3759e-40,
-6.1144e-27, -9.2380e-26, -2.4302e-25,
-9.3834e-25, -1.0289e-23, -1.9513e-23,
-4.3746e-24, -4.4359e-23, -7.0505e-23,
-8.1604e-36, -3.2928e-37, -2.2994e-40,
-3.9543e-37, -9.9513e-39, 7.4616e-41,
-4.0044e-39, 4.4392e-40, 4.8856e-40,
-3.3447e-40, -3.9935e-40, 2.4649e-40,
2.0207e-40, -3.0245e-40, -7.1986e-41,
6.2938e-40, -3.6922e-40, 1.5296e-40,
-6.4982e-41, 5.0849e-41, 5.7873e-40,
1.4327e-40, -4.2163e-40, 1.3807e-40,
2.8569e-40, 1.9139e-40, 3.2985e-40,
-5.4410e-40, 2.3070e-40, 2.1690e-40,
-1.5964e-40, -2.2781e-40, 5.6766e-40,
2.2533e-42, -2.5532e-40, -5.5822e-40,
5.7249e-40, 5.3555e-40, -4.9107e-41,
1.7538e-40, -1.2312e-40, 5.0077e-40,
6.1500e-40, 1.9980e-40, 6.2953e-40,
-7.5314e-23, -9.4299e-23, -7.1342e-23,
-8.5139e-23, -1.1237e-22, -9.0478e-23,
-6.2038e-23, -8.5180e-23, -7.3015e-23,
5.0613e-40, 1.5224e-40, -1.8977e-40,
2.4108e-41, -5.1771e-40, 6.2317e-40,
1.0465e-40, 2.8816e-41, 6.2500e-40,
3.5727e-40, 4.2717e-40, -3.5900e-40,
-4.4831e-40, 3.4260e-40, -4.8293e-40,
-2.4133e-40, 3.1140e-40, -2.0777e-40,
-2.2906e-41, 3.5923e-40, -4.4443e-40,
-4.6615e-40, -2.1123e-40, 4.5700e-40,
-4.6360e-40, -3.6052e-40, -3.4319e-40,
-3.6575e-40, -3.5707e-40, -3.0530e-41,
4.2531e-40, -1.2255e-40, -3.9607e-40,
3.5903e-40, -5.4630e-40, -3.1460e-40,
2.8820e-40, 4.9460e-40, 6.1461e-40,
8.9118e-41, -4.6579e-40, -2.4172e-40,
-5.5474e-40, -8.1848e-41, -1.6910e-40,
-1.6272e-25, -1.8802e-25, -1.7229e-25,
-1.7850e-25, -2.0338e-25, -1.8235e-25,
-1.4715e-25, -1.6733e-25, -1.4681e-25,
-5.5471e-09, -5.6862e-09, -5.7043e-09,
-5.8727e-09, -5.9823e-09, -5.8983e-09,
-5.8040e-09, -5.8670e-09, -5.7388e-09,
-9.7253e-07, -9.7248e-07, -9.4623e-07,
-1.0149e-06, -1.0042e-06, -9.6709e-07,
-1.0139e-06, -9.9930e-07, -9.5295e-07,
-4.5042e-40, 2.6725e-40, 2.3181e-40,
-4.6274e-41, -1.1799e-40, 5.0685e-40,
-1.0765e-40, 3.3322e-40, -6.1905e-40,
-1.3653e-34, -3.4690e-33, -1.1578e-32,
-1.4444e-31, -2.1995e-30, -4.8668e-30,
-1.2965e-30, -2.0189e-29, -3.3962e-29,
-2.5057e-40, 7.2876e-41, 4.5731e-41,
-1.6525e-40, 5.0987e-40, -5.4683e-40,
8.1836e-41, 6.2722e-40, -3.1057e-40,
4.0987e-40, 3.5941e-40, 5.1680e-40,
5.5563e-40, 3.1011e-40, 4.7068e-40,
1.0426e-40, -1.0803e-40, 4.4867e-40,
-4.9675e-03, 1.5412e-01, -4.1930e-03,
-6.1089e-02, 2.0405e-01, 1.9587e-01,
3.8772e-02, 1.6894e-01, -2.6163e-02,
1.0839e-30, 1.8608e-30, 1.1386e-30,
1.4863e-29, 1.9422e-29, 1.1639e-29,
1.7504e-29, 2.2177e-29, 1.3629e-29,
6.4484e-02, 6.6296e-02, 2.2838e-01,
-1.0213e-01, 7.5883e-02, -1.7531e-01,
-1.4869e-01, 1.0736e-01, 1.4129e-01,
-2.8235e-02, -2.9232e-02, -9.3912e-02,
5.1317e-02, 9.0256e-02, -2.4669e-02,
-3.2465e-02, 5.8099e-02, 9.8402e-02,
-2.3135e-01, -1.3786e-01, 2.8581e-01,
-3.2410e-01, -2.6623e-01, 6.1583e-02,
1.8696e-01, 4.7251e-02, -2.3520e-01,
2.5630e-02, -1.2358e-01, -1.5735e-01,
-1.2198e-01, 5.1970e-01, 1.9976e-01,
-1.2515e-01, 9.8768e-02, 5.8917e-02,
-3.8569e-02, -9.2729e-02, -1.8982e-01,
1.1378e-01, 5.7195e-01, -1.8265e-01,
-3.5724e-02, -2.1379e-01, -2.2129e-01,
-5.1198e-40, -3.4709e-40, 6.2940e-40,
-2.2134e-41, -3.6133e-40, -2.7075e-40,
-5.9664e-40, -2.3937e-40, 3.0876e-40,
9.1814e-41, 9.5898e-41, -3.1892e-40,
3.1093e-40, 2.7935e-40, 1.7966e-40,
-2.3967e-40, 4.0806e-40, 6.2012e-40,
5.3771e-41, 6.1000e-40, -4.6695e-40,
5.9474e-41, -4.9675e-40, 5.7403e-41,
4.7091e-40, -5.0751e-41, 3.9864e-41,
-9.7756e-41, 2.7978e-40, -5.0791e-40,
-3.4321e-40, -7.0774e-41, -5.2651e-40,
2.8034e-40, -3.3452e-40, 1.9535e-40,
-6.2300e-40, -1.8372e-40, -1.9038e-40,
-5.6564e-40, -6.1257e-40, -1.0338e-40,
-1.7191e-41, -1.2843e-41, 5.0707e-40,
-4.4587e-40, 2.7128e-40, -1.4155e-40,
-5.7475e-40, -3.4612e-40, -4.7424e-40,
1.7235e-40, -6.0028e-40, -1.6342e-40,
-5.1072e-40, -2.4721e-40, -2.8477e-41,
2.6598e-40, -4.4078e-40, 4.1763e-40,
-3.3947e-40, -5.5626e-40, 4.9713e-40,
2.1733e-40, -2.9024e-40, -4.5514e-42,
-3.4873e-40, -1.0737e-40, -1.4297e-40,
2.8514e-40, 2.6283e-40, 2.2827e-40,
3.8908e-40, -4.2140e-40, 6.1433e-40,
-4.7825e-40, -3.0140e-40, -5.9563e-40,
1.5280e-40, 2.6156e-40, 5.0361e-40,
1.9497e-01, 2.3140e-01, -3.5244e-02,
1.6876e-01, -1.7646e-02, -2.0413e-01,
9.8052e-02, -6.7906e-02, -3.9834e-02,
-5.9252e-15, -6.7431e-15, -8.1865e-15,
-5.7350e-15, -6.6893e-15, -8.9833e-15,
-8.4106e-15, -1.0631e-14, -1.5948e-14,
8.9389e-02, 6.6460e-02, 6.8477e-02,
6.1099e-03, -8.7536e-02, 1.1792e-01,
-1.0079e-01, 1.5293e-01, 4.3945e-02,
1.0168e-01, 1.0281e-01, -7.9173e-02,
2.0855e-01, 1.7537e-01, -7.1000e-02,
-1.4157e-01, -3.8478e-02, -2.7478e-01,
2.2156e-01, -6.4262e-02, -7.2841e-02,
-3.2334e-01, 6.5591e-02, 1.1163e-01,
7.2151e-02, -1.6943e-01, 5.9049e-02,
-1.4813e-01, -2.0904e-01, -8.8010e-02,
-2.7215e-01, 5.7668e-01, 1.7618e-02,
-7.1365e-02, 1.2976e-01, -1.0169e-01,
-8.9229e-02, 3.3971e-02, 1.8295e-01,
1.7204e-01, 3.8082e-01, 3.7415e-02,
5.9309e-02, -4.9550e-04, 5.1555e-01,
-5.1006e-18, -5.6038e-18, -5.8724e-18,
-5.8910e-18, -5.8379e-18, -5.6311e-18,
-5.2596e-18, -5.1835e-18, -4.6300e-18,
6.4067e-02, 1.8889e-02, -1.0634e-01,
1.7316e-04, 1.9935e-01, -1.1854e-02,
-9.3669e-02, -1.1924e-01, -1.8981e-02,
1.7465e-08, 1.7340e-08, 1.7565e-08,
1.8234e-08, 1.8008e-08, 1.8017e-08,
1.9226e-08, 1.8956e-08, 1.8651e-08,
-1.7294e-01, -1.2200e-01, -4.9577e-02,
-3.5087e-02, -1.2526e-01, 9.3445e-03,
-7.4374e-02, -1.1350e-01, 2.7510e-03,
8.5153e-02, 4.2080e-02, -5.0111e-02,
1.2845e-01, 1.9630e-01, 1.0542e-01,
-1.0095e-01, 6.2631e-02, 8.8734e-02,
3.4836e-01, 5.4389e-01, -2.2360e-01,
5.1721e-01, 5.7094e-01, -6.7491e-02,
-3.5972e-02, 1.0590e-01, -2.2984e-01,
-1.5483e-01, -5.1271e-03, 4.9780e-02,
-1.3184e-01, 2.8028e-01, -1.1427e-02,
-3.4093e-02, -6.7622e-02, -1.2359e-02,
1.3184e-02, 1.2125e-01, -1.2502e-02,
9.2730e-02, -6.5974e-02, -1.6519e-01,
1.9546e-01, -1.5188e-01, -8.1752e-02
}
,
{
-3.4905e-04, -3.5739e-04, -3.2920e-04,
-3.8506e-04, -3.9121e-04, -3.5635e-04,
-3.7303e-04, -3.7698e-04, -3.4190e-04,
2.8622e-41, -1.2033e-41, 1.2609e-40,
-4.9379e-40, -5.1047e-40, 5.5085e-41,
-4.7002e-40, -5.0136e-40, -4.5629e-40,
-5.1095e-40, 1.8741e-40, 1.8435e-40,
4.1851e-40, -8.9558e-41, -9.6681e-41,
-1.8244e-40, 2.7992e-40, 1.8116e-40,
2.8655e-40, -3.0193e-40, 2.2293e-40,
1.6805e-40, 3.3049e-40, 6.9542e-41,
-3.3329e-40, 4.2212e-40, -1.3453e-40,
-8.4502e-15, -1.1099e-14, -9.4174e-15,
-9.8778e-15, -1.1768e-14, -9.4875e-15,
-6.7805e-15, -7.4561e-15, -5.8023e-15,
6.0452e-40, 6.9262e-41, 2.9300e-40,
-6.1511e-40, -4.1269e-40, 4.4012e-40,
1.3340e-42, -2.9020e-40, -4.5529e-40,
-1.2289e-22, -1.3972e-21, -5.5694e-21,
-1.7854e-21, -1.7743e-20, -5.6749e-20,
-6.8510e-21, -6.2353e-20, -1.6203e-19,
-5.0003e-07, -5.1950e-07, -4.7654e-07,
-5.5510e-07, -5.7995e-07, -5.2753e-07,
-5.3262e-07, -5.5802e-07, -5.0971e-07,
-1.4922e-02, -1.1926e-01, -1.9067e-02,
-2.6298e-03, 2.1756e-01, 3.0148e-02,
1.4372e-01, 3.5066e-02, -1.0184e-02,
-4.1698e-12, -4.8798e-12, -6.4033e-12,
-2.3169e-12, -2.7879e-12, -3.7276e-12,
-1.6177e-12, -2.0021e-12, -2.6440e-12,
-5.9514e-40, -4.4339e-40, -3.0315e-40,
3.5756e-40, 2.5390e-40, -1.2253e-40,
2.1417e-40, 4.0569e-40, 5.3962e-40,
-5.5825e-13, -6.8528e-13, -9.3486e-13,
-2.9163e-13, -3.6959e-13, -5.1183e-13,
-1.8703e-13, -2.4740e-13, -3.4019e-13,
-2.7137e-01, -4.5025e-01, 2.6405e-02,
-7.9580e-02, 5.0698e-01, -7.8794e-02,
-3.7540e-02, -7.1115e-03, -3.9741e-01,
-5.9910e-40, -5.5101e-40, 3.1274e-41,
-6.9384e-41, -4.9294e-40, -1.0818e-40,
-3.5484e-40, -4.7965e-41, -5.2508e-41,
4.1917e-01, -1.6207e-02, -6.8506e-02,
-2.7060e-02, 5.6162e-01, 1.6696e-01,
-1.7677e-03, 1.8842e-01, -6.0493e-02,
-3.0696e-01, -1.7293e-01, -8.7143e-02,
-1.6740e-01, 1.8861e-02, -1.7112e-01,
8.6594e-02, 3.0025e-01, -7.6141e-02,
1.1317e-02, 1.0678e-01, -5.1283e-02,
-1.2872e-01, 4.2580e-01, 4.9678e-02,
-2.8372e-01, -1.3479e-01, -7.3813e-02,
-1.7038e-15, -1.1156e-15, -7.3385e-16,
-2.6350e-15, -1.6234e-15, -1.0598e-15,
-7.7860e-15, -4.6981e-15, -3.0030e-15,
-3.0246e-40, -4.1596e-40, 2.9013e-40,
8.5195e-41, -2.2396e-40, -2.0322e-40,
-5.6200e-40, 2.4820e-40, 3.1309e-40,
-3.1822e-17, -1.6585e-17, -8.8616e-18,
-5.9907e-17, -2.9812e-17, -1.6126e-17,
-2.4410e-16, -1.2541e-16, -6.7867e-17,
1.5795e-01, -1.4429e-01, -6.0501e-02,
5.9113e-02, 3.4391e-01, 1.4165e-01,
5.2564e-02, -1.8209e-01, -6.8176e-02,
-7.7363e-41, 5.9969e-40, 5.9290e-40,
-7.4888e-41, -7.0945e-41, 5.3120e-40,
1.3612e-40, -4.6718e-40, -1.0677e-40,
-1.1498e-01, -1.2925e-02, 2.6735e-02,
-8.1469e-02, 2.9678e-01, 1.8971e-01,
2.0149e-02, 2.4207e-03, -1.2549e-01,
-6.6799e-02, -3.5900e-02, -5.6111e-02,
9.5181e-02, 2.1216e-02, 2.0477e-01,
8.5923e-03, 6.8615e-03, 3.8252e-02,
4.5098e-03, 2.1321e-01, 3.4612e-03,
3.5662e-01, 4.7532e-02, 2.5319e-01,
4.1275e-02, 1.7951e-01, 3.2239e-02,
-2.6628e-21, -7.7165e-22, -4.9086e-22,
-1.4320e-21, -2.7134e-22, -1.2712e-22,
-1.9648e-21, -3.4172e-22, -1.3895e-22,
-2.2836e-40, 3.2091e-40, -4.4396e-40,
2.9048e-40, 6.0866e-40, 3.7804e-40,
-3.0676e-40, -2.4897e-40, 4.9891e-40,
-1.8955e-28, -3.4994e-29, -1.2914e-29,
-4.7737e-29, -3.5212e-30, -6.4003e-31,
-8.2908e-29, -3.1692e-30, -3.6909e-31,
-9.3327e-02, 1.5314e-01, 1.0676e-01,
2.5979e-01, -6.6826e-01, 2.3727e-01,
1.4855e-01, 1.9205e-01, 8.8246e-02,
-5.5197e-40, 5.3162e-41, -5.2933e-40,
1.0846e-41, -5.8128e-40, -3.1273e-40,
-2.8408e-40, 1.6989e-40, 4.8221e-41,
7.8403e-02, 1.6407e-01, 7.9932e-02,
3.2253e-01, -2.6036e-01, -8.9727e-02,
-7.5145e-02, 1.5536e-02, -8.2710e-02,
-2.1608e-01, -4.4619e-01, -4.4470e-02,
-3.9430e-01, -8.2373e-01, -7.0646e-01,
-6.9004e-03, -4.9697e-01, -1.4212e-01,
-1.8932e-06, -1.8356e-06, -1.6373e-06,
-1.9427e-06, -1.9113e-06, -1.7028e-06,
-1.8843e-06, -1.8616e-06, -1.6818e-06,
-4.7452e-29, -4.4894e-29, -2.5364e-29,
-5.6268e-29, -5.4363e-29, -3.0876e-29,
-4.3808e-29, -4.2767e-29, -2.4573e-29,
3.8855e-40, 3.5152e-40, -4.8707e-40,
4.3606e-41, -1.7886e-40, 5.1970e-40,
6.2864e-40, 5.9972e-40, 2.2197e-40,
-2.1903e-37, -1.9174e-37, -7.0785e-38,
-2.7149e-37, -2.4810e-37, -9.5619e-38,
-1.8463e-37, -1.7136e-37, -6.7163e-38,
-2.9062e-30, -3.1324e-30, -1.0876e-30,
-2.7434e-30, -3.7036e-30, -1.2821e-30,
-6.8828e-31, -9.8708e-31, -3.7930e-31,
-6.3329e-41, -3.8604e-41, -2.8272e-40,
-3.3350e-40, -1.5210e-40, -4.2620e-41,
-1.7669e-41, 5.2291e-40, -3.3205e-40,
-3.0738e-25, -8.2305e-24, -2.1451e-23,
-1.4470e-24, -4.5131e-23, -1.2177e-22,
-4.2841e-24, -1.3077e-22, -3.5946e-22,
-8.5637e-08, -8.4715e-08, -7.7597e-08,
-8.7326e-08, -8.7480e-08, -8.0290e-08,
-8.4525e-08, -8.4963e-08, -7.8582e-08,
-5.8581e-27, -8.8483e-27, -8.1150e-27,
-7.4336e-27, -1.2036e-26, -1.1909e-26,
-6.6006e-27, -1.0685e-26, -1.0809e-26,
-5.6355e-40, -2.3469e-40, -3.5885e-40,
-2.0755e-40, 2.0377e-40, 3.2259e-40,
-5.3947e-40, 4.2747e-41, 4.8967e-41,
4.5073e-41, 5.0069e-40, 2.6114e-40,
-4.8225e-40, -4.8317e-40, -5.4316e-40,
-5.4335e-40, -5.2994e-40, 2.6295e-40,
-1.1702e-40, -2.3137e-41, -4.5405e-40,
-4.6797e-40, 6.5582e-41, 1.8111e-40,
6.1477e-40, -1.6827e-40, -2.0288e-40,
-2.4220e-41, 4.7774e-40, 5.1050e-40,
4.9844e-40, 5.6437e-41, 4.7749e-40,
-6.8037e-41, -5.5944e-41, -5.2248e-40,
-2.9382e-40, 2.3800e-41, 1.5850e-40,
-4.5290e-40, -5.2260e-41, 2.3726e-40,
-1.9232e-40, -2.3502e-40, -2.9736e-40,
-2.8081e-40, -5.2929e-40, -4.0786e-40,
-3.0303e-41, 3.1336e-40, -5.8450e-40,
-1.5091e-40, -2.7371e-40, -4.5927e-40,
-4.0985e-38, -6.9102e-38, -5.4450e-38,
-6.2744e-38, -1.1526e-37, -9.9374e-38,
-4.8587e-38, -9.1819e-38, -8.0593e-38,
-2.9266e-29, -4.5005e-29, -3.9891e-29,
-3.8505e-29, -6.3370e-29, -6.0017e-29,
-3.2761e-29, -5.4145e-29, -5.1812e-29,
3.3692e-40, 1.0044e-40, -6.6821e-41,
9.2910e-41, 6.2137e-40, -3.5625e-40,
1.8601e-40, 3.1653e-40, -1.1506e-40,
1.2093e-40, -5.7191e-40, 5.6828e-40,
-2.3177e-40, -2.1648e-40, 5.3642e-40,
4.8826e-40, 5.2760e-40, -4.9059e-40,
-2.0721e-40, 2.0122e-40, -5.9485e-40,
3.8843e-40, -6.0861e-41, -4.0542e-40,
-3.4308e-40, -4.2822e-40, -3.9605e-40,
-5.7429e-40, 4.9242e-40, -5.9141e-40,
4.6267e-40, -2.4953e-40, -2.9300e-40,
5.3466e-40, -5.2403e-40, 3.5178e-40,
-1.8309e-40, 2.9157e-40, -7.7367e-41,
-5.8922e-40, 3.2359e-40, -6.1293e-40,
6.1138e-40, 2.2121e-40, -5.0657e-42,
4.7910e-40, -1.4080e-40, 1.9220e-40,
-3.5670e-40, 3.4204e-40, -5.0215e-40,
1.1877e-41, 2.3114e-40, -4.7794e-40,
-3.6520e-40, 4.3222e-40, -5.2866e-40,
-6.0703e-40, -4.0896e-40, -1.2521e-40,
-4.1981e-40, 5.4404e-41, 3.3337e-40,
1.3733e-01, 1.8485e-01, 7.6179e-02,
8.1719e-02, 3.3343e-01, 2.9857e-02,
-4.2753e-03, 2.0957e-01, 1.8582e-02,
2.9948e-07, 3.3403e-07, 3.7619e-07,
3.4854e-07, 3.8224e-07, 4.1507e-07,
3.7511e-07, 4.0398e-07, 4.3743e-07,
-1.7150e-41, -2.4088e-41, -1.5593e-40,
6.3817e-41, 4.8004e-41, -1.1053e-40,
-2.5225e-40, -2.7111e-40, -4.2970e-40,
1.0496e-06, 1.0916e-06, 1.1376e-06,
1.1364e-06, 1.1756e-06, 1.2051e-06,
1.1762e-06, 1.2105e-06, 1.2358e-06,
1.0037e-02, 1.4957e-01, -4.9010e-02,
2.6877e-02, 1.9067e-01, -1.9339e-03,
-2.2081e-02, -1.5137e-01, -1.6088e-01,
1.6880e-41, -2.0352e-41, -4.1857e-42,
2.0926e-40, -2.1394e-41, -5.4341e-40,
4.6824e-40, 6.2682e-40, 4.9865e-40,
-3.2967e-01, -2.5981e-01, -1.3016e-01,
-2.6507e-01, 3.2282e-01, 4.3204e-01,
-7.0936e-02, 1.9800e-01, 9.4916e-02,
-1.0122e-02, 7.4127e-02, -7.1554e-02,
7.7869e-02, 1.5734e-01, 1.3287e-01,
-9.5431e-02, 1.0984e-01, -7.6759e-02
}
,
{
-5.5262e-40, 3.7699e-40, -1.4920e-40,
4.0064e-40, -2.0632e-40, -4.4801e-41,
-3.6749e-40, 5.9043e-40, -1.5942e-40,
-5.9219e-42, -4.1286e-40, -1.6920e-40,
-2.5927e-40, -4.5458e-41, 2.0990e-40,
-4.6860e-40, 5.0483e-40, 2.8004e-40,
-4.0641e-40, 6.0770e-40, -3.8297e-42,
5.7537e-40, 5.7772e-40, -1.0048e-40,
1.5945e-40, 3.9582e-40, -2.6190e-40,
-5.1046e-40, -5.5028e-40, 5.8786e-40,
-3.5033e-40, -1.2031e-40, -3.4156e-40,
3.0058e-40, 4.3043e-40, 5.9825e-40,
4.9197e-40, 2.5974e-40, -4.3461e-41,
-4.1935e-40, -1.6383e-41, -1.4680e-40,
-5.3501e-40, -2.6348e-40, 3.0631e-40,
-5.2019e-40, -4.4123e-40, 2.3984e-40,
-4.4682e-41, -4.6000e-40, -5.0418e-40,
-4.1263e-40, 4.5391e-40, 2.8844e-40,
5.2179e-40, -1.3188e-40, 5.1600e-40,
-2.2913e-40, -3.1127e-40, 5.4478e-40,
2.3395e-41, 5.4758e-40, 2.0998e-40,
-1.9914e-10, -2.0700e-10, -1.9815e-10,
-2.1098e-10, -2.1989e-10, -2.1131e-10,
-2.0797e-10, -2.1693e-10, -2.0860e-10,
-2.1061e-40, -2.1208e-40, -3.3698e-40,
3.2370e-40, 2.9276e-40, -3.6860e-40,
3.4752e-40, -2.0660e-40, -3.8183e-40,
-8.0136e-02, 1.3809e-02, 1.6846e-03,
3.7960e-02, 8.7557e-02, -3.5498e-01,
9.8165e-03, 9.8384e-02, 1.2395e-01,
-2.8751e-02, 9.9172e-02, 5.5841e-02,
-4.0383e-02, 1.0856e-01, -5.4339e-01,
1.3245e-02, -4.7642e-02, -1.0427e-01,
-7.4696e-03, 5.0806e-02, -1.7179e-01,
5.0303e-02, -4.0322e-01, 7.4760e-01,
-9.2342e-02, 1.1958e-01, -1.8871e-01,
3.7044e-40, -4.6951e-40, -1.9873e-40,
5.3289e-41, 2.7689e-40, -4.6994e-41,
-3.1404e-40, -5.9106e-40, 6.0436e-40,
-6.0294e-40, -3.6565e-40, -1.1884e-40,
5.5933e-40, -9.5741e-41, 4.4736e-40,
4.3267e-40, -4.9583e-40, 3.4437e-40,
-1.7432e-40, 1.4518e-40, 2.1033e-40,
-3.4667e-40, 1.7222e-40, -2.5651e-40,
-5.2517e-40, 2.8983e-41, -1.3832e-40,
-1.4153e-01, 9.4023e-02, -9.8526e-02,
2.0678e-01, 4.0842e-01, -1.1853e-01,
-1.4108e-01, -1.1005e-01, -8.1274e-02,
3.4336e-41, 1.5625e-40, 2.7213e-40,
-5.3447e-40, -3.7330e-40, -3.3637e-40,
-4.3563e-40, -3.7094e-40, 1.2820e-41,
-8.1700e-02, -1.8215e-01, -1.6011e-01,
-1.4203e-01, 5.3791e-02, -3.7663e-02,
-1.1705e-01, -1.2604e-01, -8.4890e-03,
-6.1578e-02, -3.3907e-01, 2.2344e-03,
1.5060e-01, -1.9199e-01, -5.5274e-02,
6.2300e-02, 9.1084e-02, 1.3788e-02,
4.9025e-02, 3.3738e-01, -1.8104e-01,
-2.5051e-01, 8.2363e-02, 2.0325e-01,
5.6988e-02, -1.5118e-01, 6.8897e-02,
-4.6233e-40, 1.2244e-40, -3.9802e-40,
5.8530e-40, -2.4162e-40, 4.6793e-40,
-4.8362e-40, 3.3071e-40, 1.7094e-40,
3.5249e-40, -4.8579e-40, 1.9374e-40,
6.2372e-42, 5.8402e-41, 3.2851e-40,
6.1488e-40, 1.8086e-40, -5.2451e-40,
-3.0723e-40, -5.6704e-40, -5.9899e-40,
-3.5975e-40, -1.3818e-40, -2.7285e-40,
2.4468e-40, 8.3606e-41, 1.8818e-40,
-2.3749e-01, -2.7008e-01, -1.5222e-03,
1.4806e-01, 9.0783e-02, 2.7170e-02,
1.8706e-01, 1.8162e-01, -1.1799e-01,
-1.9852e-40, -4.8879e-40, -3.1971e-40,
-1.0245e-40, 9.1421e-41, 5.3018e-40,
2.2240e-40, -1.4666e-40, -4.4259e-40,
1.1835e-01, -2.7624e-01, 1.1446e-01,
1.3574e-01, 4.3109e-01, 1.3227e-01,
3.2554e-02, 1.7139e-01, -1.1988e-01,
3.5376e-02, 8.9191e-02, 6.7643e-02,
-8.2716e-02, 2.4178e-01, 6.0818e-02,
-6.7722e-02, -3.3712e-02, 3.0664e-02,
-6.6948e-02, 2.2886e-01, 1.8143e-01,
1.8636e-01, -2.4800e-01, 1.7185e-01,
-6.5479e-03, 1.8828e-01, -7.4464e-02,
-2.8281e-30, -5.8969e-31, -2.3180e-31,
-1.6163e-30, -3.8426e-31, -1.6788e-31,
-1.9412e-30, -4.1995e-31, -1.7651e-31,
-2.0525e-40, 4.6680e-40, 5.9108e-41,
1.0336e-40, -5.7226e-41, -6.1906e-40,
-1.8693e-40, 5.5777e-40, 6.0898e-40,
-3.4735e-41, -3.2674e-40, -2.3864e-41,
-3.3596e-40, 3.3107e-40, 1.0843e-40,
5.1103e-40, 6.0598e-40, -3.6267e-40,
-4.5583e-03, -1.0635e-01, -7.4962e-02,
-1.2741e-01, 2.7234e-01, 1.0508e-01,
-2.1207e-01, 9.6720e-02, 3.4641e-02,
1.1304e-12, 1.1614e-12, 9.7086e-13,
1.3361e-12, 1.3697e-12, 1.1286e-12,
1.2620e-12, 1.2938e-12, 1.0680e-12,
-8.4197e-02, 6.3834e-02, 2.3157e-02,
-2.1280e-02, 2.9074e-01, 8.5883e-02,
-1.3695e-01, -1.6047e-01, -4.5834e-02,
-1.3848e-01, -6.6090e-02, -7.7201e-02,
-5.1963e-02, 6.0643e-02, -4.9932e-02,
1.1779e-01, 1.7521e-01, 3.0366e-02,
4.7601e-03, 4.3941e-02, -3.5985e-02,
1.7692e-02, -2.3705e-01, 2.1062e-01,
7.7174e-02, -7.6616e-02, 2.0102e-02,
-3.6353e-06, -3.5534e-06, -3.2461e-06,
-3.6813e-06, -3.6196e-06, -3.3222e-06,
-3.5581e-06, -3.5179e-06, -3.2504e-06,
-7.3892e-11, -7.2930e-11, -6.8104e-11,
-7.9244e-11, -7.7770e-11, -7.2319e-11,
-7.7297e-11, -7.5673e-11, -7.0195e-11,
-1.5180e-10, -1.5027e-10, -1.4244e-10,
-1.6013e-10, -1.5761e-10, -1.4940e-10,
-1.5682e-10, -1.5395e-10, -1.4553e-10,
-9.1167e-02, 1.2374e-01, -3.8304e-02,
2.2641e-01, 2.4855e-01, -4.3174e-02,
1.4364e-01, 1.8438e-01, 1.1617e-02,
6.1925e-40, 3.3333e-40, 1.8962e-40,
3.2481e-40, -1.7566e-40, -3.0456e-40,
2.7654e-40, 3.8422e-41, 4.9191e-40,
7.5657e-02, -1.0697e-03, 3.0319e-02,
-4.7642e-02, -9.4454e-02, -2.6543e-02,
-5.3129e-02, -1.9667e-01, -1.0851e-01,
-8.5909e-03, 1.2177e-01, 2.6434e-01,
2.4468e-02, 5.0484e-02, 3.4698e-01,
-1.4764e-03, 3.7374e-02, 1.2658e-01,
2.0602e-02, -2.4624e-02, 1.3741e-01,
1.8641e-02, 4.0484e-01, 3.2976e-01,
-4.4809e-01, -3.2104e-03, 1.6290e-03,
8.1306e-41, 2.0311e-40, 2.9683e-40,
-5.7636e-40, 4.4291e-40, 4.3356e-40,
-7.1797e-41, 4.5366e-40, 3.9953e-40,
-4.5418e-40, 4.1805e-40, -3.2458e-41,
-9.4881e-41, -8.6365e-41, -1.9294e-40,
7.1954e-41, -9.8565e-41, -5.5540e-40,
-5.3769e-40, 1.4094e-40, -1.5355e-40,
8.8038e-41, -3.6848e-40, -1.2237e-40,
-2.8267e-41, -1.7583e-40, -5.9647e-40,
1.0929e-01, 2.9895e-02, -1.4923e-01,
-1.1234e-01, -1.0514e-01, -1.3280e-02,
2.2255e-01, 6.4152e-03, -1.6309e-02,
-1.5899e-40, -7.2549e-41, -2.6734e-40,
-3.3842e-40, 3.3255e-40, 4.2694e-40,
5.2940e-40, 3.2455e-40, -3.7081e-40,
6.3639e-02, -3.3720e-02, -2.3453e-02,
1.9477e-01, 5.2267e-02, 1.8565e-02,
1.6048e-01, 2.7636e-01, 1.5930e-02,
1.7673e-03, 6.3646e-02, -1.5127e-02,
-3.7787e-02, -1.4037e-01, -3.6231e-02,
-1.5636e-02, -7.8742e-02, -2.4137e-02,
-5.0748e-02, 6.5641e-02, -2.5353e-03,
8.4955e-02, 7.4231e-01, 1.3795e-01,
-1.4552e-01, 2.0869e-01, 4.0739e-02,
-2.0015e-41, 5.2988e-40, 2.7578e-40,
4.1051e-40, 1.2834e-40, -3.4898e-40,
-1.1975e-40, 4.2374e-40, -3.0404e-41,
-6.3014e-40, 4.6330e-40, -4.4141e-41,
2.5442e-41, 5.7456e-40, 2.3848e-40,
-1.0788e-40, -5.0563e-40, -5.3638e-41,
3.5728e-40, 1.9752e-40, 6.1004e-40,
2.8189e-41, -6.2151e-40, 1.1807e-41,
6.5305e-41, 5.2028e-40, 1.3692e-40,
6.4391e-02, -1.3079e-01, -3.7980e-02,
-3.2362e-01, -3.7239e-01, -8.0182e-02,
-2.6787e-01, -3.1240e-01, -1.2798e-02,
-1.2072e-40, 5.3996e-40, -3.4352e-40,
-8.0996e-41, -3.0208e-40, 3.1848e-40,
-5.6407e-40, 2.4674e-41, -2.1055e-40,
-9.2897e-02, 1.8040e-01, -4.3269e-01,
-7.6669e-02, 4.3554e-01, -4.4870e-02,
-2.3249e-02, -1.1805e-01, 1.0507e-01,
-5.2540e-02, -3.6856e-01, 1.1246e-01,
-2.3632e-02, 1.3165e-01, -1.5380e-02,
-1.1467e-02, -5.3754e-02, -4.1619e-02,
-1.5635e-01, 3.8584e-01, -1.4434e-01,
1.7523e-01, 3.7253e-02, 4.9784e-01,
5.8484e-02, -8.4711e-02, -7.7498e-02,
-1.6956e-40, 5.4293e-41, -2.5140e-40,
-3.1995e-40, -4.8337e-40, 2.5539e-40,
-1.1449e-40, 1.9503e-40, -1.7368e-40,
5.4753e-40, 5.9720e-40, -4.7821e-40,
3.8830e-40, -3.1984e-40, -2.7163e-40,
-5.3411e-40, 7.2638e-41, 4.3186e-40,
4.6654e-40, -5.9540e-40, -2.8155e-40,
-1.4801e-40, -1.6945e-40, 1.9723e-40,
5.8380e-40, -6.1587e-40, 3.3667e-40,
-2.9327e-02, -4.2746e-02, -1.5018e-01,
8.6354e-02, 2.8140e-01, 1.2970e-02,
-2.0755e-01, 6.7548e-02, -3.6049e-02
}
,
{
9.5728e-41, 5.3991e-40, -1.3764e-40,
-2.0389e-40, 2.4254e-40, 3.3492e-40,
6.5289e-41, -3.0842e-40, 5.5850e-40,
7.7599e-02, 2.5043e-02, -1.4099e-02,
-3.3184e-02, 5.6863e-01, -2.7001e-02,
-5.2659e-02, 5.4713e-02, 2.3991e-03,
2.2010e-02, -3.9120e-02, -1.1558e-01,
9.1633e-02, 1.3070e-01, 1.2489e-01,
-4.4040e-02, -1.6324e-02, -4.9631e-02,
-7.3548e-02, -2.0492e-01, 1.4043e-01,
-6.0411e-02, 5.7710e-02, -3.6840e-02,
1.3173e-02, 2.3215e-03, 1.1820e-02,
2.5772e-02, -1.3436e-01, -5.9285e-02,
-9.3983e-02, 1.1545e-01, 1.1602e-01,
-1.8505e-02, 6.1498e-02, -1.3097e-02,
9.8690e-03, -2.1338e-02, -1.2175e-01,
1.7936e-02, -2.7811e-02, 6.7037e-02,
-5.1401e-03, 7.6421e-02, -1.0794e-01,
4.6409e-02, 3.4701e-01, 2.6587e-02,
8.4175e-02, 5.2712e-01, 6.8999e-02,
-8.0756e-02, 1.9648e-01, -8.4639e-02,
1.2818e-01, 4.0660e-02, 7.6715e-02,
8.7991e-02, 4.6556e-01, -4.0025e-02,
2.1251e-03, -8.3784e-03, 5.9859e-02,
1.9835e-40, -3.4675e-40, -7.9692e-41,
-1.4304e-40, 2.3927e-40, -5.9796e-40,
3.8209e-40, -6.3260e-41, -9.2501e-41,
3.2007e-01, 1.5800e-01, -1.9594e-02,
-4.5315e-02, 1.0536e-01, -8.0692e-02,
2.1185e-01, -3.1418e-01, -1.5257e-01,
8.6294e-02, -1.3398e-01, -1.0694e-01,
8.6084e-02, -1.2393e-03, 1.7549e-02,
-1.5504e-01, -1.3112e-01, -3.5905e-02,
-3.8190e-01, 3.8393e-01, 1.6587e-02,
1.5002e-01, 1.9586e-01, -2.6260e-01,
-4.0159e-02, -8.2891e-02, -1.7761e-01,
-1.8611e-01, -1.1241e-02, -4.2538e-02,
-5.7898e-02, 2.4583e-01, 4.1590e-02,
2.4890e-02, 7.9409e-03, -2.7418e-02,
6.6194e-03, -4.2441e-02, -1.1167e-01,
-1.3236e-01, -7.9642e-02, -6.0623e-02,
-4.7198e-03, 5.6904e-02, 1.2651e-01,
1.2925e-01, -5.9162e-02, -9.1949e-04,
1.8668e-02, -2.6361e-02, -7.1042e-03,
-4.3178e-02, 2.6050e-04, 4.4799e-02,
7.9674e-02, 2.7656e-02, 7.1211e-03,
1.1463e-01, 1.0765e-01, 7.6066e-02,
-8.0780e-02, -5.4875e-02, 1.5209e-02,
-3.7365e-13, -3.7819e-13, -3.5929e-13,
-4.0298e-13, -4.0881e-13, -3.9033e-13,
-3.9409e-13, -3.9950e-13, -3.8277e-13,
-1.7847e-02, -1.7537e-02, -3.7313e-03,
2.6531e-02, 7.5951e-02, -4.0134e-03,
1.7387e-02, 6.0044e-02, -9.0211e-02,
2.7091e-02, 8.8333e-02, 1.0619e-01,
5.0470e-02, 1.2406e-02, 1.5503e-01,
-1.5936e-02, -2.2422e-01, -2.4640e-02,
-8.2430e-03, -1.4097e-02, -6.2474e-02,
8.0534e-02, 1.8603e-01, -3.1725e-02,
-3.1621e-03, 2.0362e-03, -1.4002e-01,
-7.3799e-03, 1.5881e-01, 6.7195e-02,
4.5946e-02, 2.4358e-01, 1.4677e-01,
-7.4788e-02, 6.7297e-02, 9.0735e-02,
-8.4553e-03, -1.1877e-02, 4.4209e-02,
-1.4281e-02, -6.8849e-02, -4.1386e-03,
3.2286e-02, 4.7128e-02, -1.2988e-02,
-2.2990e-02, -8.9265e-02, 6.4050e-02,
-2.3354e-02, 1.3846e-01, -1.6256e-01,
-6.5661e-02, -2.8983e-02, -4.3497e-02,
1.0597e-02, -2.3534e-02, -2.6068e-02,
-7.8812e-02, 1.9502e-01, 6.8938e-03,
3.2025e-02, 2.3353e-02, 4.9225e-02,
-5.0273e-40, 1.2403e-41, 5.8127e-40,
3.2777e-40, -3.5740e-40, 4.9781e-40,
-2.4198e-40, -4.6311e-40, 1.3330e-40,
-3.0803e-01, 1.7804e-01, 1.0604e-01,
4.1405e-01, 1.9740e-01, -5.3067e-02,
2.3738e-01, -1.6828e-01, 1.5338e-01,
6.6857e-03, 1.8623e-01, -1.2126e-01,
-1.6323e-01, -1.2719e-02, -1.7743e-01,
-1.3612e-01, -3.4442e-02, -1.0552e-01,
-1.4560e-01, 1.8771e-01, 8.4508e-02,
5.8732e-02, -2.2378e-01, 1.2673e-01,
3.0455e-03, 3.8438e-02, -6.2235e-02,
1.9951e-02, 2.6963e-01, -1.8594e-01,
-8.6550e-02, -1.3097e-01, -3.5032e-02,
2.0423e-02, -9.0499e-02, 1.7130e-01,
-1.8592e-01, 6.6808e-02, -1.5768e-01,
-6.4402e-02, -1.2265e-01, 6.8487e-02,
1.9899e-02, 9.3376e-02, 7.8577e-02,
-1.3384e-01, -7.6429e-02, 1.7142e-02,
-1.2385e-01, -1.1821e-01, -1.2716e-03,
5.3770e-02, 1.4973e-01, 1.4762e-01,
-4.7688e-02, -1.1733e-01, -1.5032e-01,
-2.0699e-01, -9.4949e-02, -2.6374e-02,
4.4489e-02, 1.8376e-02, -7.6844e-02,
1.8831e-40, -2.6056e-40, -4.7602e-40,
-3.4079e-40, 1.5054e-40, 1.2387e-40,
2.3040e-40, 1.4644e-40, 5.6365e-40,
-2.0809e-02, 5.3674e-03, 1.7057e-03,
2.4160e-01, 4.1348e-01, 3.5215e-02,
8.2154e-02, 2.0431e-01, 1.0366e-01,
-1.5149e-02, 1.0521e-01, -4.1706e-02,
-5.0651e-02, 2.3615e-02, -9.3860e-02,
-1.0823e-01, -6.3645e-02, -1.1573e-01,
-2.4116e-02, 1.3546e-02, -1.0298e-03,
1.2102e-02, 2.2630e-02, 1.1375e-01,
1.3966e-02, 1.0754e-01, 1.6621e-01,
1.6213e-02, 2.0816e-01, 8.9441e-02,
-7.5452e-02, 3.4580e-03, -3.3317e-01,
5.0917e-02, 1.3898e-01, -1.0723e-01,
6.0473e-03, 8.9741e-02, -6.8206e-02,
-7.1770e-02, -3.5661e-01, -2.8935e-01,
-1.6324e-02, 2.5728e-02, -1.1281e-02,
-1.3390e-01, -9.3090e-02, 4.3366e-02,
4.8620e-02, 1.4917e-01, 1.6295e-01,
2.4123e-03, -7.6347e-02, -8.0226e-02,
6.0740e-03, 3.7065e-02, 4.5518e-04,
-1.3793e-01, 2.3848e-01, -1.1199e-01,
1.0422e-01, 1.1214e-01, 3.3457e-02,
-3.2827e-40, 5.9135e-40, 3.3773e-40,
-5.8903e-40, -5.9439e-41, 1.9973e-40,
-3.6141e-40, -4.7563e-40, -1.0222e-40,
7.3457e-02, -8.2031e-02, -2.9504e-02,
-5.3420e-02, 4.9697e-02, 7.6779e-03,
2.1180e-02, 1.1069e-02, -1.1940e-02,
1.7302e-02, 9.9063e-02, 4.8847e-02,
4.9513e-02, 2.4240e-01, 2.7174e-01,
2.7487e-01, 1.9410e-01, 3.1165e-01,
-6.7532e-03, -1.1608e-01, -5.0876e-02,
1.2107e-01, 3.1073e-01, 7.1681e-02,
-1.1411e-01, -1.7902e-01, 7.8898e-02,
-2.0117e-02, 3.6394e-01, 1.4546e-01,
-8.0861e-03, -4.3956e-02, -1.3473e-01,
5.1519e-02, -3.1122e-01, -4.6847e-02,
5.0405e-02, -1.0611e-02, -1.0557e-01,
-4.4346e-02, -1.4505e-01, 5.3977e-02,
-2.6288e-01, 1.8247e-02, -1.1606e-01,
1.0706e-01, -9.3675e-02, 1.1757e-01,
-5.0440e-02, -1.1784e-01, -4.0599e-02,
1.9618e-01, 9.9370e-02, 8.2258e-02,
2.6762e-02, -5.0740e-02, -1.8302e-02,
5.3340e-02, 6.5710e-02, 6.1552e-03,
-7.2158e-02, -3.5563e-02, 8.2140e-02,
3.1534e-40, 3.6427e-40, 3.0437e-40,
4.2856e-41, -4.7870e-40, 5.6317e-40,
-2.4673e-40, -6.9736e-41, 8.1050e-41,
1.4544e-01, 8.2490e-02, -9.2349e-03,
2.6124e-01, 2.7494e-01, -5.4946e-02,
1.8233e-01, 1.2428e-01, -6.7498e-03,
9.7639e-02, -6.2085e-03, 4.8154e-02,
2.7379e-02, -1.8443e-01, 4.0402e-02,
1.8893e-03, -5.2282e-03, 6.7548e-03,
-1.6559e-01, 9.7901e-02, -1.1869e-01,
-2.1287e-01, 4.1023e-01, -9.7379e-02,
-1.3767e-03, -1.6343e-01, -9.5059e-02,
-1.3547e-01, 2.0094e-01, 1.0102e-01,
-2.1311e-01, -1.5088e-01, 1.8175e-01,
4.6946e-02, -1.3963e-01, 1.0220e-01,
1.7536e-01, -2.4758e-01, -1.1481e-02,
6.1596e-02, -4.0352e-01, -1.4348e-01,
3.1690e-02, 1.7240e-01, 7.0780e-02,
9.9953e-02, -1.4154e-01, -8.3038e-02,
1.4527e-01, -2.1430e-01, -7.5840e-02,
1.6146e-01, 3.7508e-02, 5.3833e-02,
1.6723e-01, 1.7113e-01, -4.8512e-02,
2.1319e-01, 4.7031e-01, 1.1570e-01,
2.0330e-01, 2.4636e-01, 6.9924e-02,
-2.1165e-40, -1.9259e-40, -5.0990e-41,
-7.1298e-42, -4.2590e-41, 3.1709e-40,
4.1065e-40, -4.2585e-41, 3.4243e-40,
-1.0338e-40, 4.6039e-40, -3.3818e-40,
-3.9589e-41, 5.9574e-40, -5.8014e-41,
1.4505e-41, -3.5326e-40, -3.9806e-40,
4.2423e-40, -1.7055e-40, -4.9666e-40,
2.2853e-40, -2.4684e-40, -1.3794e-40,
-5.6764e-40, -1.7905e-40, -5.8915e-40,
-1.4755e-27, -2.0405e-28, -4.8677e-30,
-7.1151e-28, -9.7603e-29, -3.5264e-30,
-2.7455e-29, -5.7734e-30, -2.8633e-31,
-5.9960e-06, -5.9595e-06, -5.8686e-06,
-6.0381e-06, -6.0191e-06, -5.9605e-06,
-5.9849e-06, -5.9981e-06, -5.9654e-06,
-4.8277e-22, -7.0529e-22, -8.7179e-22,
-4.6334e-22, -6.3505e-22, -8.8438e-22,
-3.3883e-22, -4.2421e-22, -5.9002e-22,
-2.9574e-40, 4.0860e-40, -1.5966e-40,
-6.7527e-41, 7.6661e-41, -5.9491e-40,
3.0843e-40, 8.1079e-41, -2.5140e-40,
-3.7315e-40, 9.4787e-41, 4.6794e-40,
1.9383e-40, 5.0336e-41, 3.0561e-40,
-5.4286e-40, 5.5999e-40, -4.6977e-40
}
,
{
-1.7778e-01, 5.2351e-03, 1.6035e-02,
-9.7482e-02, -1.1056e-02, -5.0999e-02,
1.7460e-01, -4.0005e-02, -5.0911e-02,
-9.3843e-02, 1.2640e-01, -1.5016e-02,
-5.2880e-01, 1.9469e-01, -9.0037e-02,
-8.9136e-02, 9.8632e-02, -1.5009e-01,
-1.8080e-01, 1.1396e-01, -2.6178e-02,
-1.6689e-02, 1.4132e-01, -6.7769e-03,
-2.1120e-02, 6.8616e-02, -7.8209e-02,
4.8237e-02, -2.5303e-02, 1.7882e-02,
-4.2852e-02, -1.5071e-02, -3.3818e-02,
1.3635e-01, 4.5330e-01, 2.1489e-01,
2.7362e-02, -7.4152e-02, 2.3185e-03,
1.8771e-01, -2.0827e-02, -7.5581e-02,
1.4675e-01, -6.5552e-02, 4.2292e-02,
1.3990e-01, -4.1598e-01, 2.1609e-03,
1.5997e-01, 1.1375e-01, -1.8272e-02,
1.9045e-02, -4.2702e-02, -2.5602e-02,
1.6432e-01, -1.2783e-01, -1.8285e-03,
2.9414e-01, 1.7401e-01, -2.6321e-01,
-1.0125e-01, 1.3565e-01, 1.5894e-02,
-3.7351e-40, 6.3010e-40, -1.2071e-40,
-4.6380e-40, 1.8442e-40, -3.5994e-40,
-2.1459e-40, -4.3455e-40, -6.1978e-41,
-2.3638e-40, -4.6965e-40, -3.4232e-40,
-1.6517e-40, 4.7178e-40, -1.6757e-40,
6.7890e-41, -4.3000e-40, 1.8323e-40,
4.5416e-40, -2.9010e-40, -1.5200e-40,
-3.5533e-40, -8.7351e-41, 6.5595e-42,
5.1625e-40, -6.0418e-40, -2.7846e-40,
-2.1861e-10, -2.2422e-10, -2.1298e-10,
-2.2653e-10, -2.3500e-10, -2.2512e-10,
-2.1802e-10, -2.2681e-10, -2.1608e-10,
-3.2862e-40, 3.4241e-40, -1.3264e-40,
2.8762e-40, 1.3843e-40, 3.0949e-40,
-3.7702e-40, 2.6194e-40, 2.1451e-40,
-3.2283e-40, -5.5487e-40, 5.8744e-40,
1.6124e-40, 3.3512e-40, 3.1454e-40,
-3.5417e-40, -5.7692e-40, 5.5184e-40,
3.5641e-40, -4.3187e-40, -3.5314e-40,
4.9246e-40, 5.9593e-40, 8.3132e-41,
-2.3841e-40, -5.6196e-40, -3.2230e-41,
4.3824e-40, -3.8344e-40, -9.9086e-42,
-2.9323e-40, 2.1916e-40, 4.4739e-40,
5.6837e-41, 5.1796e-41, -2.4338e-40,
-2.2853e-40, -3.8920e-40, 6.1587e-40,
-2.9474e-41, 4.6214e-40, -3.6292e-40,
-1.4928e-40, -3.6708e-41, 5.2020e-40,
-1.2983e-12, -2.6539e-12, -1.9817e-12,
-6.5613e-12, -1.0255e-11, -6.6919e-12,
-8.3217e-12, -1.7832e-11, -1.1086e-11,
-4.9138e-40, -9.0061e-42, 4.6251e-40,
-2.9970e-41, -2.5468e-40, -3.5660e-40,
2.5450e-40, -9.5634e-38, -3.2369e-32,
-1.0233e-06, -8.2108e-07, -1.1668e-06,
-5.9592e-07, -3.9529e-07, -5.7435e-07,
-6.0253e-07, -3.8785e-07, -4.9365e-07,
-8.9372e-37, -2.1590e-36, -2.1060e-40,
-1.5666e-35, -1.1466e-38, -2.3366e-40,
-5.4077e-38, 5.0487e-40, -3.3736e-40,
-1.5357e-13, -8.4607e-14, -1.9206e-16,
-5.5373e-13, -3.0787e-13, -1.0513e-15,
-1.0468e-13, -8.6069e-14, -2.2453e-16,
-4.7501e-14, -1.3426e-13, -1.1133e-13,
-1.3801e-14, -2.4024e-14, -3.5120e-14,
-1.9817e-17, -1.3229e-17, -3.2854e-17,
-1.4365e-18, -4.1143e-15, -9.2614e-14,
-1.1174e-19, -1.6235e-15, -1.5600e-13,
-1.2643e-21, -3.9578e-17, -1.2038e-14,
-2.9789e-40, -4.6452e-40, 1.5649e-40,
-1.8445e-40, -5.2942e-40, 2.5130e-40,
6.2269e-40, 3.9166e-41, -2.4197e-40,
9.0835e-02, -5.2035e-03, -2.5980e-02,
-1.0090e-01, -7.4167e-02, 1.3364e-01,
1.0302e-01, -1.5250e-01, 1.2417e-01,
4.7205e-02, -2.3839e-01, -1.4983e-02,
5.6824e-02, -1.8259e-02, 9.6426e-02,
5.9740e-03, -1.4198e-01, -2.1076e-01,
-1.5837e-01, 6.4749e-02, -2.1417e-01,
-3.4048e-02, 4.9638e-01, 2.0984e-03,
-1.4335e-01, 4.8295e-02, -9.2209e-02,
1.9450e-01, -1.3603e-01, 1.2008e-01,
1.6803e-01, 5.6805e-02, 1.1518e-01,
5.9320e-02, -3.8200e-02, -1.1340e-01,
-8.6877e-02, 1.1533e-01, -4.9870e-02,
-7.2811e-03, 2.5730e-01, -1.8536e-01,
-6.4965e-02, 1.0364e-01, 1.3706e-02,
4.6974e-02, -1.0049e-01, -1.7460e-01,
-1.7910e-01, 3.0771e-01, -2.5757e-01,
-2.2846e-02, -3.7491e-03, -5.2171e-03,
-4.7762e-02, -4.7776e-02, 5.1125e-01,
-2.0210e-01, 6.4815e-02, -6.1606e-02,
7.3686e-04, -1.6226e-01, -3.0327e-02,
5.6501e-40, 5.2828e-40, -5.9773e-40,
-4.3530e-40, -1.1658e-40, 4.9705e-41,
4.8101e-40, 5.0236e-40, 2.0476e-40,
-1.1412e-01, 1.3391e-01, -1.2279e-01,
1.4370e-01, 3.7617e-01, 7.1407e-02,
6.9661e-02, 3.1963e-01, -1.7089e-02,
-4.7530e-02, 6.5411e-02, -2.4915e-02,
3.3429e-02, -1.3899e-01, -3.3875e-02,
-1.9261e-02, -1.3162e-01, 1.1415e-01,
2.0599e-02, -3.8667e-02, -7.2190e-02,
-2.1112e-01, -1.6525e-01, -2.3430e-02,
-1.2287e-02, -2.6637e-01, 1.0859e-03,
-2.8564e-02, 4.8846e-02, 4.2412e-02,
1.4632e-01, 1.5974e-02, -1.0699e-01,
5.5661e-02, -2.0952e-01, 2.4151e-02,
-2.3510e-02, -5.0570e-02, 1.0799e-01,
1.7495e-01, -1.5788e-03, -1.6447e-02,
7.7642e-02, -9.3888e-02, 1.3891e-03,
2.2658e-02, 1.4058e-01, 1.0639e-01,
-5.5626e-02, -3.0794e-01, -5.7160e-02,
1.0874e-01, -8.3907e-02, 4.2106e-02,
1.7688e-02, 1.8090e-01, -2.1718e-03,
-1.0659e-02, -2.1302e-01, 1.0056e-01,
-6.0693e-02, -2.3624e-02, 6.3688e-03,
-2.7320e-40, -1.3336e-40, 2.4202e-41,
-7.1225e-41, 1.2848e-40, 1.5426e-40,
-4.2798e-40, 6.5075e-41, 6.2629e-40,
1.6905e-01, -1.7379e-01, -2.1360e-02,
-2.9396e-01, 1.1782e-01, 7.9111e-02,
-6.4767e-03, -1.9949e-01, 5.4243e-02,
-3.2753e-02, -1.5810e-01, 5.2257e-02,
-1.8133e-02, 2.0548e-01, -2.8071e-01,
-5.3725e-02, 8.4067e-02, -7.4639e-02,
8.9137e-02, -2.3078e-01, -1.9626e-01,
3.1276e-01, 1.5332e-01, -1.9590e-01,
-1.8318e-02, 6.8460e-02, 9.1476e-03,
8.2398e-02, 8.5883e-03, 7.6830e-02,
-1.4580e-01, 4.6253e-01, -3.1900e-01,
-1.1051e-01, 6.3807e-02, -2.5130e-02,
-1.2029e-01, -3.8982e-03, 2.1654e-02,
-3.2017e-01, 2.0265e-01, -1.7311e-01,
-1.3229e-02, 1.3805e-01, -6.2689e-02,
-3.6619e-02, -1.9366e-01, 2.7177e-01,
5.5937e-02, 7.9713e-02, -2.3872e-01,
-3.9690e-02, 2.2914e-02, -1.7779e-02,
1.1110e-01, 1.6618e-01, 3.6139e-01,
7.9777e-02, 4.3655e-01, 3.0597e-01,
-5.5125e-02, 6.1229e-02, 1.2414e-01,
2.1644e-40, 7.2343e-41, 5.5580e-40,
-4.3927e-40, 5.0561e-40, -1.5560e-41,
-3.2783e-40, -8.8219e-41, 5.4415e-40,
-6.7176e-02, -3.4930e-02, -2.7087e-02,
1.0489e-01, 2.1178e-01, -1.6752e-01,
-1.2627e-01, -2.4207e-01, -7.4667e-02,
-3.1470e-02, -1.3365e-02, 8.7742e-02,
-2.2809e-02, -4.7991e-01, 2.4740e-02,
6.4418e-02, 3.4818e-02, -2.9275e-01,
-2.8830e-01, -7.0458e-02, 7.8922e-02,
-1.4436e-01, 4.1068e-02, 6.2896e-02,
4.1061e-03, 2.1844e-01, 9.0488e-02,
-1.1085e-01, 8.3761e-02, 3.2634e-02,
3.2470e-01, -2.7760e-01, 4.1235e-02,
8.6625e-02, 2.6816e-01, -1.3560e-01,
3.8789e-01, 3.2406e-01, 1.0631e-01,
7.5131e-02, -2.0206e-01, 1.3027e-01,
4.0382e-02, 2.4350e-01, -3.6042e-03,
-1.0063e-01, 1.9418e-01, -7.7039e-02,
9.4531e-03, 7.1605e-02, 1.4004e-01,
-2.0591e-02, 4.5944e-02, -2.6721e-03,
-3.4665e-03, 2.2560e-01, -8.2930e-02,
-1.5507e-01, 2.7206e-01, -2.8665e-02,
-3.4909e-03, 1.7696e-02, -8.5492e-02,
2.1541e-40, -3.3029e-40, 1.7678e-40,
-3.9857e-40, -1.1965e-40, -8.6754e-41,
-4.0721e-40, 2.2073e-41, 4.2728e-40,
-1.0496e-02, 5.4120e-02, -1.6498e-02,
-5.9387e-02, 2.3757e-01, -8.0381e-02,
2.3739e-02, -1.3715e-01, -3.0906e-02,
-8.5760e-03, 2.4518e-02, -6.9090e-02,
2.1623e-02, 8.9641e-02, 9.9031e-02,
-1.0052e-02, 4.6506e-02, -1.5756e-01,
8.5003e-02, -3.6434e-03, 1.3816e-02,
9.0532e-02, 2.3661e-01, 1.8077e-01,
2.8120e-02, 4.3753e-02, 2.2981e-02,
3.5830e-02, 5.7995e-02, -5.6879e-03,
3.7708e-02, -2.6373e-01, 2.0886e-01,
-4.0632e-02, 1.6891e-01, -6.8996e-02,
-1.1972e-01, -4.3628e-02, 2.0278e-02,
-1.4818e-01, 4.0844e-02, 1.5917e-01,
-4.5684e-02, 1.4075e-01, -2.0784e-02,
-1.1533e-03, -2.7897e-01, -8.8707e-02,
-1.7907e-02, 1.8400e-01, 1.1026e-01,
-2.3183e-03, 6.3875e-02, -4.2394e-03,
3.2021e-02, -8.8955e-02, -2.2298e-02,
8.1353e-02, 3.3079e-01, -2.0616e-01,
-3.5802e-02, 4.9804e-02, -9.2712e-02,
-1.5940e-07, -1.6158e-07, -1.5812e-07,
-1.6273e-07, -1.6555e-07, -1.6260e-07,
-1.5867e-07, -1.6192e-07, -1.5975e-07
}
,
{
-1.5080e-02, 1.1294e-01, 7.1187e-02,
1.1628e-02, -8.4938e-01, 8.5457e-02,
-3.9642e-02, -2.3879e-02, 1.0029e-02,
2.6648e-40, 9.1590e-41, 3.3285e-40,
-3.3445e-40, -2.5194e-40, -2.0946e-40,
3.6800e-40, -1.1584e-40, 6.2195e-40,
-1.3560e-41, -8.0151e-41, 4.4048e-40,
-4.1209e-40, 2.7411e-40, 3.2419e-40,
5.8333e-40, 1.1503e-40, -5.0783e-40,
-5.5301e-02, -2.4971e-02, 4.9251e-02,
-2.5589e-01, 1.6560e-01, -8.0956e-02,
4.0518e-01, 3.1320e-02, -1.4262e-01,
1.2250e-02, 5.1989e-02, 3.0706e-03,
-7.9534e-02, -1.9801e-01, -2.7791e-02,
2.1768e-01, 6.9978e-02, -4.2325e-02,
-1.9165e-02, -2.1179e-02, -2.1558e-02,
3.6816e-01, -5.2929e-02, 9.5790e-02,
2.8095e-01, -1.4731e-01, 3.4182e-02,
2.3702e-02, 4.0764e-02, 3.5767e-02,
-8.4586e-02, 1.9025e-01, -1.6794e-01,
-1.0273e-02, 3.2259e-01, -1.5841e-01,
2.6794e-01, 5.2084e-02, 1.2761e-02,
-1.1169e-01, -1.7808e-01, 1.1363e-01,
-1.3808e-01, -1.7764e-02, -1.7420e-02,
1.5840e-02, -2.3405e-01, 7.6361e-03,
-6.6082e-02, 7.9778e-02, -2.0423e-01,
-1.9594e-02, -6.3370e-02, 3.3351e-02,
-2.0396e-40, -3.0207e-40, -3.2364e-40,
2.3575e-40, 5.8301e-41, -3.7432e-40,
-3.6291e-40, 3.3441e-40, 1.4574e-40,
-4.3792e-40, -2.5814e-40, -3.4986e-41,
-3.4920e-40, -4.4757e-40, 3.2192e-40,
4.7222e-40, -7.3197e-41, -3.4635e-40,
5.1495e-02, 7.8843e-02, 4.2243e-02,
-2.1245e-01, 1.9568e-01, 7.9369e-03,
2.2795e-02, 2.2801e-02, 7.6895e-02,
3.0044e-01, -1.4041e-01, -2.3677e-02,
-1.1656e-01, -7.5113e-02, 1.0625e-02,
-1.2133e-02, 5.0658e-02, -7.2944e-02,
-3.3652e-02, -2.0452e-01, -4.1048e-02,
2.8531e-01, 1.2116e-01, -2.1526e-02,
-2.4564e-01, -4.1870e-02, -5.5819e-02,
-2.3157e-01, -2.5594e-02, 1.1154e-01,
2.1234e-01, 3.2762e-01, -2.9000e-01,
1.8591e-02, -5.9820e-02, -9.0807e-02,
-3.0027e-01, -1.8370e-01, 1.2086e-02,
2.1178e-02, 2.9559e-01, 1.2966e-01,
6.8542e-02, 7.7710e-03, -6.0304e-02,
3.3019e-03, -1.9135e-02, 9.3227e-03,
-9.9003e-03, -1.0101e-01, -3.3513e-01,
-8.4091e-03, -1.5918e-02, -3.4323e-02,
3.8770e-40, -2.8639e-40, 4.6953e-40,
4.2631e-40, 6.2568e-41, -5.3500e-40,
-2.1987e-40, 1.3435e-40, 4.4101e-40,
-3.9973e-40, 6.3046e-40, 1.6046e-40,
4.4338e-40, 1.6940e-41, 4.1598e-40,
2.6132e-40, -2.9888e-40, -7.5708e-41,
-1.5991e-02, 8.2749e-02, -6.3776e-02,
-3.2220e-03, 4.1443e-02, -8.1219e-02,
-1.1231e-01, 6.7586e-01, -1.7600e-01,
-4.0371e-02, -7.9044e-02, 1.2451e-01,
4.1907e-02, -8.8159e-02, -1.1229e-01,
-4.0654e-03, -4.4087e-03, 1.2942e-01,
9.3318e-03, -6.5085e-02, 1.0165e-02,
-2.8758e-02, -4.9997e-02, 4.6069e-02,
4.2107e-04, 2.1718e-01, 3.1080e-03,
-9.1277e-03, -2.8568e-02, 1.6202e-02,
-8.2490e-03, 1.2888e-01, -1.3159e-01,
1.6065e-02, 4.0143e-02, 2.7043e-01,
-3.4809e-02, -8.1302e-03, 6.0786e-02,
5.1845e-02, 4.6995e-01, -1.0392e-02,
2.3359e-02, -1.8364e-01, -3.7343e-01,
-8.2996e-02, 9.7724e-02, -6.1012e-02,
2.8225e-02, 8.8706e-02, 1.3443e-02,
3.7515e-03, 1.7772e-02, 6.5945e-03,
-7.3847e-12, -7.5629e-12, -6.9337e-12,
-7.6292e-12, -7.8624e-12, -7.2877e-12,
-7.0582e-12, -7.3197e-12, -6.8467e-12,
1.5445e-11, 2.0754e-11, 2.0524e-11,
2.1239e-11, 2.5909e-11, 2.5983e-11,
2.0986e-11, 2.5190e-11, 2.2478e-11,
-4.7164e-02, -2.4754e-02, -1.8256e-02,
1.0526e-01, -4.6010e-03, -2.2784e-02,
-5.2028e-02, -1.6408e-01, 7.9112e-03,
-8.1863e-02, 4.2772e-02, -9.9446e-04,
-5.5521e-02, -1.1264e-01, -4.5782e-02,
-1.1026e-01, 2.1443e-02, -4.5120e-02,
-1.4141e-02, -2.8116e-03, 2.6990e-02,
-2.0201e-01, 4.3214e-01, 2.9373e-02,
-2.1768e-01, -2.7230e-02, 5.5396e-03,
5.0196e-02, 1.5506e-01, -5.7328e-02,
4.8323e-02, 3.8243e-02, -1.3533e-01,
-9.8862e-03, -5.6971e-02, -7.1500e-02,
1.0272e-01, 7.4686e-02, 7.4732e-02,
8.3744e-02, 1.5834e-01, 2.9221e-02,
6.5641e-02, 7.7697e-02, 3.5746e-02,
-1.6614e-01, -2.3128e-01, 4.4691e-02,
6.3546e-02, -3.8105e-01, 3.4110e-02,
-3.5022e-02, -2.3782e-02, 2.8664e-02,
-3.8813e-41, -2.8626e-40, -9.0218e-41,
4.1216e-40, -4.4215e-40, 3.1198e-40,
5.6281e-40, 2.0477e-40, 2.7797e-40,
-4.4903e-40, -6.2574e-41, 4.9971e-40,
5.0135e-40, -3.1945e-40, -2.4694e-40,
2.6587e-40, -4.9583e-40, -4.9771e-40,
3.7139e-02, 5.2936e-04, -2.3658e-02,
-3.6199e-01, -5.1912e-02, -5.1969e-02,
2.5415e-01, 2.4109e-01, 9.8721e-03,
5.5061e-02, -4.7469e-02, 3.0045e-02,
2.1565e-03, -2.3866e-02, -2.3496e-02,
6.0892e-02, -4.6442e-04, -5.0200e-02,
5.4971e-02, -1.7234e-02, -3.2759e-03,
4.8225e-01, -1.1234e-01, 3.8257e-02,
5.2105e-02, -2.8473e-03, -1.0355e-02,
-9.5654e-03, -1.8751e-01, 1.7079e-02,
7.0133e-02, 7.6363e-01, -8.7388e-02,
-5.6536e-02, -1.9152e-01, -1.6043e-01,
2.0359e-01, 7.4214e-02, 3.1970e-02,
-1.8199e-01, -1.9386e-01, -2.5967e-03,
-3.4609e-02, 3.3870e-02, 5.8835e-02,
8.8220e-02, 9.9265e-02, 7.1240e-03,
-9.1395e-02, -3.1699e-01, -2.9120e-02,
-1.8436e-02, -2.1432e-02, -4.5465e-02,
-3.2013e-40, 3.2019e-40, 4.8747e-41,
2.6585e-40, 6.1463e-40, 1.4176e-40,
-1.5286e-40, 3.0543e-40, 7.2032e-41,
-6.0758e-40, -3.6200e-40, 1.2123e-40,
1.3627e-40, 3.2983e-40, 3.6171e-40,
-4.2148e-40, 1.1102e-40, 3.2714e-40,
-3.4763e-02, -3.1632e-02, 3.0044e-02,
-2.0935e-01, 1.3533e-01, -9.1607e-03,
-1.5931e-01, 1.0771e-01, -6.6518e-02,
2.4399e-02, 2.2923e-03, 5.1575e-02,
-1.4154e-01, -1.0013e-02, -7.5696e-02,
1.0849e-01, 1.2575e-01, -7.3161e-02,
-1.5217e-02, -2.7659e-02, -3.1401e-02,
3.4960e-01, 7.2390e-02, 2.0722e-02,
3.9440e-01, 9.1821e-04, 1.7842e-02,
-1.5670e-02, 5.3020e-02, 6.0536e-02,
-1.8853e-01, 2.7532e-01, -1.9681e-01,
8.3258e-02, 9.4285e-02, -1.2695e-01,
2.7593e-01, 1.1456e-01, 1.6048e-02,
-5.1675e-01, 1.4727e-01, 7.5170e-02,
-6.9143e-02, -9.2948e-02, 3.4687e-02,
1.4128e-02, -7.9962e-02, 8.0446e-02,
3.7011e-02, -1.3400e-01, -2.0725e-02,
-6.4981e-03, 7.0724e-02, 6.6167e-02,
-4.5940e-41, 2.5437e-40, -3.3111e-40,
5.9661e-40, 6.2521e-40, 5.6418e-40,
1.9187e-40, -5.8872e-40, 5.5747e-40,
-1.6402e-11, -2.2097e-11, -1.7224e-11,
-2.2755e-11, -2.9977e-11, -2.1231e-11,
-1.3688e-11, -1.7479e-11, -1.3081e-11,
6.4790e-03, -3.8464e-03, -1.0008e-02,
-2.6001e-02, -7.9483e-02, 3.3711e-02,
2.6659e-03, -3.2634e-02, 1.0767e-02,
4.9939e-03, 1.4064e-02, -3.4294e-02,
4.8529e-02, 6.3386e-01, -3.6805e-02,
-1.3703e-01, 2.5878e-02, -4.8617e-02,
3.2186e-02, 6.6382e-02, 1.9305e-02,
7.0196e-02, -1.6892e-01, -2.8980e-02,
9.7762e-02, 9.7998e-03, -5.1620e-03,
5.0753e-02, -4.5071e-03, -3.9836e-02,
-6.0381e-02, -9.2016e-02, 9.5433e-02,
-1.0045e-02, 8.7955e-03, 4.9429e-02,
-1.8363e-02, -1.1912e-01, 9.7347e-03,
-1.5657e-01, -2.1035e-01, -4.9737e-02,
-3.0025e-02, -6.4959e-02, -5.6107e-02,
3.2927e-40, 5.7263e-40, 6.2889e-40,
-6.0716e-39, 5.3050e-41, -1.7152e-40,
-3.2493e-38, -1.5841e-40, -1.9343e-40,
4.9763e-40, 5.5142e-40, -4.3462e-40,
-2.2649e-40, 1.4321e-40, -2.6779e-40,
2.3072e-41, 5.4080e-40, -6.4200e-41,
2.2827e-40, -5.4515e-41, -4.1768e-40,
3.9033e-40, 6.1988e-41, 5.9877e-40,
-4.3355e-41, -5.1088e-40, 5.9845e-40,
-4.8238e-40, -1.8586e-40, 4.8699e-40,
-9.7225e-41, 4.3387e-40, -4.3683e-40,
-7.9278e-41, -5.3614e-40, 2.1911e-40,
-3.3982e-40, -5.3335e-40, 3.8540e-40,
1.9051e-40, -2.0840e-40, 2.2868e-40,
-3.5020e-40, -3.4276e-40, 2.7395e-42,
3.9197e-40, 6.1843e-40, -1.5888e-40,
4.3516e-40, -6.1852e-40, -5.3692e-40,
-4.3268e-40, 3.5154e-40, 3.4477e-40,
-4.8414e-40, 2.2647e-40, -2.5591e-40,
4.6326e-40, -3.0462e-40, 4.7817e-40,
-4.9853e-40, -5.3425e-40, -2.9848e-40,
-1.3329e-07, -1.3784e-07, -1.3049e-07,
-1.3376e-07, -1.3905e-07, -1.3204e-07,
-1.2479e-07, -1.2994e-07, -1.2410e-07
}
,
{
-2.5964e-02, 2.9670e-02, 1.2100e-01,
-3.0371e-02, -1.5277e-02, -1.8589e-01,
-1.8650e-02, -1.2852e-01, -6.6297e-02,
9.7934e-04, -5.1835e-02, -1.0278e-03,
-1.2336e-02, 2.2130e-01, -1.2373e-01,
-2.3451e-02, 3.4217e-02, -1.0118e-02,
-3.0558e-01, -8.5390e-02, -1.4360e-02,
1.2473e-01, -1.7005e-02, -3.6816e-02,
-8.9125e-02, -6.1400e-02, -2.0623e-02,
1.3736e-02, 1.2441e-02, -4.3491e-02,
6.4806e-02, 3.7012e-01, 3.8064e-02,
-1.3731e-02, -2.4859e-01, -2.5450e-01,
-6.5111e-03, -1.4271e-01, -5.0481e-02,
5.3240e-02, -3.4843e-02, -2.2703e-02,
3.7414e-02, 1.0334e-01, -7.2237e-02,
1.4216e-02, 3.4231e-02, -2.0890e-02,
2.7879e-02, 1.3717e-01, 4.5864e-03,
3.0460e-03, -1.1734e-01, 4.4439e-02,
6.4825e-03, 1.6324e-02, 1.4928e-02,
-8.8420e-02, -1.0779e-01, -9.0653e-02,
3.1086e-02, -2.9067e-02, -8.8488e-02,
-1.6779e-40, -6.3646e-41, -6.2486e-40,
2.3154e-40, 2.8049e-40, 3.7718e-40,
-3.3950e-40, -3.1501e-40, 5.8709e-40,
2.1435e-02, -4.3732e-01, 1.5520e-02,
3.4080e-02, 1.9912e-01, -8.1413e-02,
-3.2816e-02, 5.7844e-02, 8.9258e-03,
-1.1662e-02, -1.1721e-02, 4.3033e-02,
5.2135e-02, -2.2503e-01, 2.3941e-01,
3.8400e-02, 1.8075e-01, -1.4776e-01,
2.6784e-01, 2.2817e-01, -3.0553e-03,
-6.7998e-02, -1.2050e-01, 1.4714e-02,
2.4045e-02, -1.4329e-02, -1.6705e-02,
-1.1421e-02, 4.2139e-02, 4.2944e-02,
1.8809e-02, -2.5221e-01, 9.7562e-02,
-4.1600e-02, 4.0069e-03, 7.5290e-02,
-2.0092e-02, 2.3537e-01, 2.4356e-02,
3.1957e-02, -4.8573e-02, 2.9379e-02,
6.4562e-03, -1.1527e-01, -9.1223e-02,
-2.3432e-02, 5.2881e-02, -7.3239e-02,
-3.7048e-02, -2.1481e-01, 5.9801e-05,
-4.2646e-02, -1.8366e-02, -1.0681e-01,
-1.3366e-01, -1.7123e-01, -3.5629e-02,
1.1216e-01, 1.1479e-01, 9.5297e-02,
2.4728e-02, -7.3135e-03, -3.4373e-02,
-2.3917e-40, -4.1869e-41, 3.7775e-41,
2.8931e-40, -9.4850e-41, 2.5694e-40,
3.3549e-40, -2.4334e-40, -5.5933e-41,
-2.0900e-02, 2.1203e-02, -4.7169e-02,
2.3632e-02, -7.1148e-01, 4.9722e-02,
-7.8963e-03, 5.0689e-02, 2.2619e-02,
-4.7364e-03, 3.2037e-02, 1.1004e-02,
-4.3001e-03, 2.5245e-01, 5.9112e-02,
2.8932e-02, -1.1267e-01, -2.3739e-01,
-6.5379e-02, 5.2462e-03, -1.6807e-02,
1.0960e-01, 1.7943e-01, -6.3043e-03,
9.3102e-02, 7.3103e-02, 2.5259e-02,
5.6835e-02, 4.0467e-02, 2.5447e-03,
9.4599e-02, 2.5222e-01, 6.9855e-02,
4.4758e-02, 1.8073e-01, 1.5075e-01,
2.0329e-02, -4.9412e-02, 2.0663e-02,
-7.1648e-03, 1.4986e-01, 2.1212e-01,
2.7657e-02, -6.8660e-02, 1.7321e-02,
1.0629e-02, -1.0722e-02, 2.8247e-02,
-1.1303e-02, 1.0076e-01, -4.0592e-01,
2.6744e-02, 7.3650e-02, 5.7966e-02,
2.8122e-02, -7.5961e-02, -9.4797e-03,
-1.3010e-01, -5.4184e-01, -1.3619e-01,
-1.8661e-03, -1.4357e-01, 7.9520e-03,
-1.3538e-09, -1.6580e-09, -1.7289e-09,
-1.2386e-09, -1.5132e-09, -1.5987e-09,
-1.1157e-09, -1.3420e-09, -1.4090e-09,
1.5441e-02, -1.8142e-01, -8.6802e-02,
-4.0983e-02, 2.4351e-01, -5.8181e-02,
-2.9568e-02, 3.9561e-03, 3.4181e-02,
-2.9210e-02, 2.5403e-02, 9.1331e-02,
2.3621e-02, 2.3954e-01, 5.2487e-02,
1.6509e-02, -6.2728e-02, 1.3448e-02,
1.2855e-01, 1.1892e-02, -1.3356e-02,
1.0810e-01, 1.6760e-01, -3.2040e-02,
6.2209e-02, 4.0682e-02, 3.9772e-02,
-6.1711e-03, 5.0588e-02, -1.0811e-01,
1.5744e-02, 1.6091e-01, -6.1739e-02,
-5.6717e-02, -1.0657e-02, -3.7943e-02,
-4.0595e-02, 8.0149e-02, 2.0216e-02,
3.8838e-02, -6.3586e-01, 2.3785e-01,
-1.0472e-02, 6.3899e-02, -8.2184e-02,
-1.9137e-02, 8.1163e-02, 6.7065e-02,
-2.2377e-03, 1.1860e-01, 3.4122e-02,
1.0501e-02, 2.9851e-02, 7.5841e-02,
5.8970e-02, -1.2188e-01, 7.7982e-02,
-2.6516e-02, -4.1289e-01, 2.1471e-02,
3.3957e-02, 3.5762e-02, -5.7857e-02,
-2.7357e-30, -3.4780e-30, -3.0306e-30,
-1.5188e-30, -1.9888e-30, -1.8755e-30,
-7.7431e-31, -9.7571e-31, -9.7402e-31,
-1.8497e-02, -2.4554e-02, 1.4428e-01,
1.4217e-02, -2.3647e-01, 8.4097e-02,
-1.0251e-02, -4.2137e-03, 6.0831e-03,
1.7742e-03, 2.1487e-02, 3.3147e-02,
-1.0971e-02, 3.0162e-01, 5.2391e-02,
1.8341e-02, -1.3390e-01, 9.4303e-02,
-1.5685e-01, 9.8434e-02, -1.2502e-03,
3.1370e-01, -2.8879e-02, 2.6313e-03,
1.7548e-02, 6.6741e-03, -1.7681e-03,
5.2062e-02, 6.6914e-02, 7.5256e-03,
2.4966e-02, 2.8081e-01, 2.9815e-02,
2.2375e-02, 1.4257e-03, -7.4702e-02,
1.5372e-02, 3.9587e-02, 4.6909e-02,
-2.2911e-02, -1.4568e-01, -3.8964e-01,
2.2850e-02, -4.2297e-02, 6.5736e-02,
-6.9905e-03, -6.3972e-02, -1.8430e-01,
4.4453e-03, 2.0687e-01, 3.0032e-01,
1.7243e-02, 9.8548e-03, -9.7476e-02,
-7.9682e-04, -2.1199e-01, -4.3461e-02,
-4.2929e-02, -2.8227e-01, 2.8997e-02,
-1.8741e-03, 1.1166e-02, 1.8381e-03,
-5.6725e-16, -1.0368e-15, -1.1480e-15,
-5.5537e-16, -9.9929e-16, -1.1499e-15,
-3.8787e-16, -6.4019e-16, -7.7595e-16,
4.4505e-02, 8.8803e-02, 1.1384e-02,
-3.9434e-02, 1.9319e-01, -1.2016e-02,
-4.6072e-02, 1.1769e-01, 7.4816e-03,
-3.7856e-02, -1.7147e-02, 1.5984e-01,
-2.6459e-02, 1.7469e-01, 1.2584e-01,
1.6387e-02, 1.7370e-01, -1.7350e-01,
-3.0008e-01, 2.1485e-01, -5.4302e-02,
5.7724e-02, 3.2168e-01, -2.5261e-02,
6.9277e-02, 7.5035e-02, 6.3485e-02,
-1.1688e-01, 2.6068e-02, -1.3490e-01,
-1.6085e-01, 1.9409e-01, 1.1434e-01,
-7.3819e-02, -7.7880e-02, 7.3699e-03,
-9.9972e-02, 1.3554e-01, 2.1656e-02,
-8.8303e-02, 5.4435e-01, -4.0582e-02,
-3.4805e-02, -1.5291e-01, -3.6917e-02,
-3.4377e-02, -3.3086e-02, -9.5097e-02,
-7.4538e-03, 2.2545e-01, -2.6380e-02,
1.4440e-02, 1.3205e-01, 1.6164e-01,
9.2164e-02, -8.4307e-02, 7.8922e-02,
1.2519e-01, -6.1809e-01, -1.0895e-01,
6.2744e-02, -4.4951e-02, -3.2548e-02,
-2.5422e-21, -6.3849e-21, -9.5560e-21,
-1.9248e-21, -4.7107e-21, -6.4244e-21,
-1.4638e-21, -3.1947e-21, -3.7663e-21,
-8.6113e-03, -7.0987e-02, 5.8265e-02,
-1.3148e-02, 5.6371e-01, 5.0580e-02,
1.1741e-02, -3.5614e-02, -6.1265e-02,
1.4758e-03, 3.3349e-02, -1.0867e-02,
-4.0234e-02, 1.9894e-01, 1.3972e-01,
-1.9167e-02, -4.1723e-02, -1.9982e-01,
-3.0756e-01, 2.6284e-02, -1.9058e-02,
-7.9349e-04, 1.2644e-01, 2.9567e-02,
-3.9274e-02, 1.1030e-02, -9.4885e-03,
1.3541e-02, 1.7044e-01, 8.9626e-02,
6.6814e-02, 2.6430e-01, 1.7409e-01,
-6.1034e-04, 1.7569e-02, 1.3090e-01,
-4.1941e-03, 8.9599e-02, -3.3684e-02,
-1.1310e-02, -4.3731e-01, 5.7177e-02,
-4.5718e-04, 1.0175e-01, 4.1211e-02,
2.9756e-02, -1.1601e-01, -7.3171e-02,
2.7939e-02, 2.1334e-01, -4.0210e-01,
-8.6847e-03, 8.1829e-02, 4.4225e-02,
-1.1411e-01, -1.7697e-01, -5.8087e-02,
7.9613e-02, -4.2814e-01, -1.0814e-01,
-3.0610e-02, 1.1342e-03, -2.2322e-03,
-1.1254e-10, -1.4207e-10, -1.5402e-10,
-9.9123e-11, -1.2394e-10, -1.3338e-10,
-8.8840e-11, -1.0857e-10, -1.1463e-10,
3.0283e-02, -5.6191e-02, -1.0447e-01,
-1.4578e-02, -2.8745e-01, 1.9089e-01,
-2.7251e-02, 9.8069e-02, -1.4580e-02,
-3.0276e-02, 1.4366e-02, 2.6363e-02,
-8.4962e-02, 7.8998e-02, -4.7717e-02,
-3.2004e-02, -2.1579e-02, 1.1247e-02,
1.3895e-01, -3.3900e-01, 7.7998e-03,
2.4769e-01, -1.8506e-01, -2.3116e-03,
3.1361e-02, -1.1718e-02, -1.8286e-02,
-1.3020e-01, 1.4334e-01, -5.5700e-02,
-3.5386e-02, 1.0992e-01, -8.0235e-02,
-5.8978e-03, 7.7039e-02, -7.4619e-02,
-8.1603e-02, 1.2982e-01, -7.3193e-02,
-6.1469e-02, 1.7131e-01, 4.0255e-01,
-6.4582e-03, -8.2741e-02, -2.2220e-02,
1.6876e-02, -3.2590e-02, 5.5645e-02,
2.5231e-02, 2.9984e-01, -3.6995e-02,
9.3322e-03, 2.0758e-01, -2.1986e-02,
-4.9568e-02, 2.1857e-03, 8.6127e-02,
8.6593e-02, -5.8134e-01, 3.4507e-01,
4.8855e-02, -1.0506e-01, 4.1584e-02,
2.5428e-40, -4.4558e-40, -2.2090e-40,
-2.9727e-40, -4.8454e-40, 3.0397e-40,
1.1696e-40, -3.3028e-40, -2.2959e-40
}
};
static __device__ __constant__ const float HDNL1biasL[8][8] =
{
{
-3.1869e-08, -3.8279e-01, -6.3693e-05, -5.9054e-02, 9.3774e-04, -2.9944e-02, -1.1156e-03, -7.5635e-02
}
,
{
-1.7701e-01, -1.3417e-06, -3.0706e-40, -1.9022e-06, -1.2965e-02, -6.6444e-40, 1.4699e-02, 2.6082e-02
}
,
{
-3.7577e-07, 4.4550e-03, -8.1266e-04, 3.2408e-01, -1.1321e-07, -1.8907e-23, -1.9770e-25, -3.2394e-02
}
,
{
-2.1525e-14, -1.4130e-02, -1.9410e-02, -1.8703e-02, -2.9177e-02, -4.0635e-02, 7.8097e-02, -1.1643e-01
}
,
{
-2.6309e-02, -2.2238e-02, 6.8700e-03, -1.7973e-02, -1.0893e-02, -1.1888e-02, -4.9598e-03, -6.3663e-06
}
,
{
-1.2406e-03, -2.4901e-12, -9.7265e-07, 6.3490e-03, 1.3495e-01, -3.8411e-03, -6.6630e-03, -7.3614e-03
}
,
{
-2.7729e-03, -4.8174e-03, -6.3012e-03, 2.0491e-01, -2.0110e-03, -3.0974e-03, 5.1407e-01, -3.5016e-08
}
,
{
0.0324, 0.0140, 0.6750, 0.2661, 0.3646, 0.3591, 0.5597, 0.0816
}
};
static __device__ __constant__ const float HDNL1kernelsL10[4 * 8] =
{
0.0882, 0.0422,
0.3775, 0.4754,
-0.3209, -0.4870,
-0.0384, 0.0530,
0.1034, 0.0173,
0.5011, 0.3900,
0.3621, -0.1645,
-0.1304, 0.0013,
0.2230, 0.3026,
0.1618, -0.4514,
-0.2097, 0.1894,
-0.0326, 0.1434,
0.2421, 0.3363,
-0.0938, 0.3156,
0.1137, -0.2165,
0.2273, -0.1284
};
static __device__ __constant__ const float HDNL2kernelsL1[9 * 8] =
{
-2.0676e-02, 6.7641e-03, 2.8287e-01,
2.5576e-01, 1.9765e-01, -2.4700e-01,
3.5056e-01, 2.9306e-01, -2.2245e-01,
8.4706e-02, -2.9455e-01, -5.5831e-02,
-8.4635e-02, -9.6835e-02, 3.1208e-01,
1.7690e-01, 2.7624e-02, 5.1954e-02,
-5.3869e-01, 7.2934e-02, -1.7662e-03,
-3.1402e-02, 3.1700e-01, 1.4965e-01,
3.8569e-02, 5.5025e-03, -6.6555e-03,
-4.2049e-38, -4.1971e-38, -4.1488e-38,
-4.2855e-38, -4.2871e-38, -4.2363e-38,
-4.1861e-38, -4.1974e-38, -4.1677e-38,
1.8451e-01, -5.4584e-02, 1.4494e-01,
1.3433e-01, 1.0073e-01, 2.6371e-01,
6.1261e-02, 2.2116e-01, 2.0074e-01,
5.9669e-02, -3.9168e-02, 2.1674e-01,
-2.9132e-01, 3.0285e-03, 1.2625e-01,
-4.3415e-02, 1.8663e-01, -1.6554e-01,
1.0102e-01, 6.3466e-02, 1.5225e-01,
2.1692e-01, 1.9860e-01, -7.0456e-02,
-1.6406e-03, -2.7834e-01, -3.5449e-01,
-3.0140e-01, -4.2348e-01, -5.8263e-01,
2.3140e-01, -2.6843e-01, -1.1069e-01,
-9.1484e-02, 1.1486e-02, 5.6396e-02
};
static __device__ __constant__ const float HDNL2biasL1[8] =
{
-9.0964e-02, 2.1136e-01, -1.2011e-02, -4.5657e-38, -1.4443e-01, 1.8968e-01, -2.9027e-02, 1.6199e-01
};
static __device__ __constant__ const float HDNL2kernelsL[8][9 * 8 * 8] =
{
{
4.4561e-02, 4.3527e-01, -8.9737e-02,
-4.9011e-03, 1.4879e-01, -8.2210e-02,
-1.7593e-02, 4.9294e-02, 1.8058e-01,
-3.3827e-02, -7.9055e-02, 2.6982e-01,
-5.2485e-02, -4.2046e-01, -5.6838e-02,
1.0919e-01, -7.3141e-02, 9.4797e-02,
6.2764e-02, 2.5475e-01, 1.3705e-01,
2.0997e-01, 7.3360e-01, 2.0801e-01,
-1.1500e-01, 3.1245e-01, 6.7457e-01,
-5.1481e-39, -5.1520e-39, -4.9367e-39,
-5.1383e-39, -5.1642e-39, -4.9479e-39,
-5.1323e-39, -5.1859e-39, -4.9547e-39,
1.3849e-01, 1.1564e-01, -1.8175e-01,
-5.5355e-03, -1.5117e-01, -2.4654e-01,
8.1590e-03, -1.1681e-01, 3.4700e-05,
-2.5950e-01, -1.4182e-01, 3.1814e-01,
1.7662e-01, 1.8420e-01, -1.5181e-01,
7.6233e-02, -7.8372e-02, -3.1968e-01,
-4.5770e-01, 4.1562e-02, 1.3721e-01,
-5.8444e-02, 3.3148e-02, -2.3370e-01,
1.5374e-01, -1.1162e-01, -7.4099e-03,
-1.5716e-01, -1.8356e-01, 2.1114e-02,
-3.2233e-01, 2.1064e-02, 2.7019e-01,
-1.3702e-01, 2.6969e-01, 2.1033e-01,
8.9027e-02, -7.9969e-02, 1.0096e-01,
6.6773e-02, 3.9558e-02, -7.4944e-02,
-5.9789e-02, 1.2265e-01, 3.3873e-02,
-9.7157e-03, 9.2906e-02, 6.0300e-02,
-2.2104e-03, 6.8198e-02, -1.2931e-01,
8.9288e-02, -1.2554e-01, -4.3270e-02,
1.0660e-01, 1.1609e-02, -1.2415e-01,
2.6372e-02, -3.6311e-02, 1.5625e-01,
-7.9595e-02, -3.3662e-01, -4.0760e-01,
-2.9566e-39, -2.8760e-39, -2.8816e-39,
-2.9566e-39, -2.8964e-39, -2.9115e-39,
-2.9566e-39, -2.9179e-39, -2.9130e-39,
7.9255e-02, 9.4548e-02, 8.8155e-02,
-2.8163e-02, 1.2428e-01, -6.4973e-03,
7.7875e-02, 7.4765e-02, -5.2405e-02,
-1.4886e-02, -7.1499e-02, -7.0719e-02,
9.7562e-02, 9.0948e-02, -5.6588e-02,
-1.2872e-02, -6.6390e-02, -6.4147e-02,
9.8262e-02, -2.4215e-01, -1.7051e-01,
1.8096e-01, 1.8106e-01, 1.3108e-01,
2.0649e-01, 1.2242e-01, 3.7225e-02,
-2.5125e-01, -1.0073e-01, 4.5330e-01,
1.8588e-01, -2.6809e-01, -1.5709e-01,
4.7668e-01, -2.4208e-01, -6.6012e-01,
1.3561e-01, 5.4109e-02, 6.1899e-02,
-1.9605e-02, 1.1349e-01, 3.5781e-02,
3.5513e-03, 3.1212e-02, -6.0399e-02,
5.9258e-02, -1.8175e-02, 7.3714e-02,
2.0052e-02, 4.3245e-02, -5.0879e-03,
-1.1082e-02, -1.0753e-01, -1.7896e-03,
2.9139e-02, 2.2747e-01, -6.4075e-02,
7.3097e-02, 1.5703e-01, -5.3815e-01,
1.0620e-01, -1.1386e-01, 1.7103e-01,
-3.8728e-39, -3.8299e-39, -3.8320e-39,
-3.9065e-39, -3.8445e-39, -3.8135e-39,
-3.8838e-39, -3.8114e-39, -3.8255e-39,
2.3253e-02, 6.9893e-02, 1.4774e-01,
9.6087e-02, 2.3102e-03, -3.4449e-02,
2.6819e-02, 1.0254e-01, -2.8200e-02,
3.9553e-02, 4.7191e-05, -5.5558e-02,
4.1641e-02, 5.8706e-02, -1.0337e-01,
1.1291e-01, 5.9622e-02, 7.0677e-02,
-2.5162e-01, 7.6659e-02, 1.7245e-01,
-5.8522e-02, 1.4365e-01, 2.1189e-01,
-2.8897e-02, -5.7365e-02, 1.4232e-01,
1.7854e-02, 1.7404e-03, -8.7356e-03,
-6.0777e-02, -6.2687e-02, -1.1500e-02,
-1.6468e-01, -2.5058e-01, -1.2798e-01,
2.3193e-02, 1.7209e-01, 1.6687e-01,
-3.4483e-02, -1.6846e-02, 2.5930e-02,
1.4410e-01, 4.2932e-02, -5.0149e-03,
4.7269e-02, 1.1276e-01, -9.2701e-03,
1.5323e-02, 1.3552e-02, 9.0256e-02,
-8.9393e-03, 7.0903e-02, -6.9379e-02,
1.8645e-01, 1.0543e-01, -1.5590e-01,
2.1056e-01, 1.1051e-01, -1.5514e-01,
-7.0484e-02, -1.5153e-01, -5.0873e-01,
3.2730e-39, 3.2358e-39, 3.1222e-39,
3.2642e-39, 3.2358e-39, 3.0921e-39,
3.2730e-39, 3.2358e-39, 3.0899e-39,
1.2225e-02, 1.2386e-01, 6.7712e-02,
3.1263e-02, 1.3617e-01, 1.5352e-01,
2.3405e-02, 8.5466e-02, 8.7303e-02,
-2.0372e-02, 8.3465e-02, -7.4233e-02,
1.2269e-01, 8.4046e-02, -3.6869e-02,
1.0242e-01, 7.3218e-02, -1.1496e-01,
-1.4539e-01, -2.3923e-01, -2.2818e-01,
-3.2368e-02, -7.4360e-02, 2.3493e-02,
1.7004e-01, 6.2924e-02, 8.9327e-02,
-1.1449e-01, -1.4973e-03, -7.0451e-03,
-9.3205e-02, -1.0312e-01, 4.6503e-02,
-2.2148e-01, -1.8111e-01, -1.1992e-01,
9.8140e-02, 9.9823e-02, -2.0282e-02,
-8.1973e-02, 1.4255e-01, -5.2392e-02,
8.0350e-03, -4.8299e-02, -7.7908e-02,
4.2383e-02, 3.0707e-02, 2.8560e-02,
1.0437e-01, 6.1290e-02, -9.7796e-02,
-1.7125e-02, -1.3572e-01, -1.5345e-01,
-1.3292e-01, 2.9477e-02, 6.8032e-02,
1.5741e-01, 4.0258e-01, 2.5838e-01,
1.3948e-01, 3.5713e-01, -3.9825e-01,
-1.9224e-39, -2.4076e-39, -2.4529e-39,
-1.9181e-39, -1.9894e-39, -4.0240e-39,
-1.9335e-39, -2.3920e-39, -4.0147e-39,
-2.1714e-02, -3.5299e-02, -7.5803e-03,
-2.4087e-02, 7.5265e-02, 7.6697e-02,
4.5309e-02, 8.9529e-02, 7.6510e-03,
1.0813e-02, 3.1294e-02, -2.5907e-02,
1.1962e-02, -6.8664e-03, -1.4084e-01,
7.7013e-02, -1.2305e-01, -6.7800e-02,
-9.7392e-02, 4.4082e-02, 1.4473e-01,
4.9436e-02, 2.8859e-01, 2.8252e-01,
-3.5828e-02, -7.5616e-02, 2.4875e-01,
-6.7684e-02, 1.1290e-01, 4.2827e-02,
-1.0860e-01, 1.2952e-01, 5.9784e-01,
-3.5402e-01, -3.9558e-02, -6.0775e-01,
-1.2854e-02, 1.5240e-01, 1.4115e-01,
-2.8134e-02, -1.2939e-02, -2.6203e-02,
1.1300e-01, 1.4481e-01, -5.1454e-02,
1.2688e-01, 2.8536e-02, 9.4877e-02,
9.6033e-02, -1.3901e-02, 6.0035e-02,
-1.1249e-01, 4.3971e-02, -1.0918e-01,
8.2500e-02, 2.1413e-01, 3.9015e-02,
1.8361e-01, 2.5271e-01, -2.2794e-01,
-8.1195e-02, -1.2269e-01, -2.6097e-01,
7.6827e-39, 7.7882e-39, 7.6893e-39,
7.7006e-39, 7.7857e-39, 7.7384e-39,
7.6985e-39, 7.7712e-39, 7.7399e-39,
1.4458e-02, 1.0801e-01, 1.5906e-01,
-1.4676e-02, 1.3699e-01, 9.2460e-02,
-3.6479e-02, 1.4529e-01, -2.8681e-02,
-3.3251e-02, -7.3096e-02, -1.4330e-01,
5.7009e-02, -3.1905e-02, -1.2035e-01,
1.1838e-01, 5.7011e-02, 2.0800e-02,
-1.1567e-02, -2.2125e-01, -9.3953e-02,
-7.5378e-02, -1.2069e-01, 1.3217e-01,
-7.7357e-02, -1.3171e-01, 1.2776e-01,
-1.1397e-01, -3.5183e-02, 2.2994e-02,
-6.5101e-02, -1.5019e-01, -2.7451e-02,
-2.4260e-01, -1.3543e-01, -1.9889e-02,
-1.9798e-39, -3.5282e-40, -1.9216e-39,
-1.9140e-39, -1.9370e-39, -1.9943e-39,
-1.8623e-39, -1.8665e-39, -1.9320e-39,
-4.8850e-39, -5.0283e-39, -4.9987e-39,
-5.0868e-39, -5.0814e-39, -5.0779e-39,
-5.2489e-39, -5.1086e-39, -5.1234e-39,
-2.9120e-39, -3.0278e-39, -2.9633e-39,
1.3186e-39, 6.0555e-39, 6.0419e-39,
-5.5922e-39, -8.5992e-40, -2.8529e-39,
-3.4668e-39, -3.5127e-39, -3.4668e-39,
-3.2831e-39, -3.4668e-39, -3.6734e-39,
-3.2142e-39, -3.2831e-39, -3.5816e-39,
1.3445e-39, 1.3621e-39, 1.3375e-39,
1.4539e-39, -2.2695e-40, 1.4522e-39,
1.3563e-39, 1.3339e-39, 1.3001e-39,
-4.4670e-39, -4.4026e-39, -4.3159e-39,
-4.5047e-39, -4.3505e-39, -2.7259e-39,
-4.5265e-39, -4.4721e-39, -4.4990e-39,
-1.9864e-39, -4.1379e-39, -3.7189e-39,
5.2465e-39, 2.5220e-39, 1.5639e-39,
-3.9760e-39, -5.7033e-39, -4.0978e-39,
-6.3745e-40, -4.7511e-39, 2.3456e-39,
-1.5164e-39, 5.0431e-39, 5.1197e-39,
8.7052e-40, 1.4947e-39, -1.1546e-39,
5.3140e-02, 1.0281e-01, 1.4767e-01,
-6.1530e-02, -9.4166e-02, 4.8671e-02,
5.6787e-03, -1.4551e-01, 1.5614e-02,
-3.4826e-02, -5.1148e-02, 9.7079e-02,
-1.3603e-02, -1.2249e-01, -1.9330e-02,
-6.8184e-02, -1.4344e-01, -9.4023e-03,
-7.4629e-02, 3.9634e-02, 1.3445e-01,
4.2153e-02, 7.1129e-01, 2.8703e-02,
7.8247e-02, 7.2210e-01, -6.6198e-01,
-6.1010e-39, -6.2892e-39, -6.4008e-39,
-6.0825e-39, -6.3221e-39, -6.3883e-39,
-1.4962e-39, -1.1702e-39, -1.2143e-39,
5.5512e-02, -2.1522e-02, 1.0866e-01,
-9.2812e-02, -3.5119e-02, 1.1396e-01,
-1.3922e-01, 6.7287e-02, -5.5626e-02,
-2.0492e-01, 8.1441e-02, -1.3513e-01,
4.7447e-02, 2.0081e-01, -3.1249e-01,
-1.8546e-02, 2.0680e-01, 7.3979e-02,
8.8928e-02, -4.3606e-01, -8.4823e-02,
-5.6133e-02, 3.5132e-01, 1.8633e-01,
-4.3855e-03, 5.4869e-02, 1.1658e-01,
1.7423e-01, -5.3107e-02, 2.2925e-02,
-1.7622e-01, 4.4453e-02, 2.8131e-02,
2.6863e-01, -2.9085e-01, -1.5098e-01
}
,
{
-2.4230e-40, 5.4425e-39, 3.4517e-39,
-1.9803e-39, -1.5207e-39, -3.5630e-39,
-4.9409e-39, -2.9280e-39, 7.7966e-40,
2.4867e-39, -2.1848e-39, 3.2524e-39,
-6.2860e-39, 4.0411e-39, -3.6956e-39,
-3.3384e-39, -1.0908e-39, 5.4261e-39,
-3.6691e-40, 9.4949e-40, -1.7279e-39,
-1.0644e-39, -2.1371e-39, -2.5125e-39,
2.9368e-39, -5.3820e-39, -3.9771e-40,
-1.4703e-39, -3.6960e-39, -4.4161e-39,
8.2800e-40, -4.9175e-39, 3.1868e-39,
5.5703e-39, -3.0263e-39, -1.6991e-39,
5.2691e-39, 4.8127e-39, 4.1346e-39,
-1.3013e-39, -1.7101e-39, -3.5467e-39,
1.1496e-39, 2.0938e-39, -4.2970e-39,
-5.5314e-39, 6.4852e-40, -5.0870e-39,
3.9377e-39, -4.1683e-39, -3.5404e-40,
-3.6188e-39, 5.4657e-39, 2.1279e-39,
3.4090e-40, 2.4425e-40, 9.3423e-41,
-2.3450e-39, 3.1518e-40, 4.3061e-40,
-2.6175e-39, -2.4696e-39, -2.3755e-39,
2.2764e-39, -4.4934e-39, 8.5722e-40,
5.1798e-39, 2.7072e-39, 5.3750e-39,
5.4335e-40, 3.8556e-39, -3.4799e-39,
-4.8963e-39, -1.1413e-39, -5.3918e-40,
6.1843e-39, -1.8521e-39, -1.3450e-39,
-2.0906e-39, -3.2544e-39, -2.8205e-39,
5.3550e-39, -3.0202e-39, -3.4181e-39,
-3.0043e-39, -3.2900e-39, -3.2915e-39,
6.1849e-39, -3.3421e-39, -3.3995e-39,
-4.8657e-39, -4.7034e-39, -4.7467e-39,
-4.6555e-39, -4.6045e-39, -4.6954e-39,
-4.8886e-39, -4.7333e-39, -4.7805e-39,
-2.0900e-39, -1.9429e-39, -2.0572e-39,
-2.0270e-39, -1.9074e-39, -1.9275e-39,
-2.1243e-39, -2.1134e-39, -2.1539e-39,
-4.4175e-39, -4.6412e-39, -4.6582e-39,
-4.6364e-39, -4.8757e-39, -4.6795e-39,
-4.4571e-39, -4.5038e-39, -4.4570e-39,
-3.2662e-39, -3.1163e-39, -3.2050e-39,
-3.2098e-39, -3.0887e-39, -3.1635e-39,
-3.3183e-39, -3.1411e-39, -3.2824e-39,
8.6839e-40, 5.7318e-39, 1.8373e-40,
4.6732e-39, -4.5549e-41, 1.2817e-39,
3.7642e-41, -6.2591e-39, -5.0492e-39,
5.0057e-39, 6.0612e-39, 2.0220e-39,
3.7436e-39, 4.8326e-39, 3.1353e-39,
3.5289e-39, 4.7177e-39, 6.2666e-39,
-1.4963e-01, -8.0360e-02, -7.9054e-02,
-1.3731e-01, 5.0766e-02, 6.9673e-02,
3.2213e-02, 3.3250e-02, 1.3170e-01,
-2.9718e-02, -2.6931e-02, 1.5768e-02,
5.9232e-02, 7.8471e-02, 9.9465e-02,
2.4872e-02, -4.4226e-02, 3.2357e-02,
-6.0139e-02, -2.2756e-02, -5.5412e-02,
4.5363e-02, 1.6393e-01, 3.7428e-02,
5.2497e-02, 9.5435e-02, 9.7155e-02,
8.2849e-02, 5.9711e-02, 1.4352e-01,
1.1756e-02, 1.5440e-02, 1.3039e-01,
4.3324e-03, 5.9119e-02, 1.1129e-01,
-3.9591e-03, 5.8617e-02, -1.3843e-02,
-2.9949e-02, 3.4877e-02, 5.0679e-03,
3.7278e-02, -2.5221e-02, 1.2191e-01,
1.5626e-01, 8.9797e-02, -1.5458e-02,
1.5607e-01, 1.4561e-02, 1.1720e-01,
-1.6112e-02, 7.7908e-02, -6.1322e-02,
3.8589e-39, 3.9262e-39, 3.8641e-39,
3.9450e-39, 3.8805e-39, 3.9383e-39,
3.8384e-39, 3.8027e-39, 3.7700e-39,
6.2294e-02, -5.6804e-03, -4.7293e-01,
1.3161e-01, 3.1187e-01, -1.8013e-01,
4.9908e-02, 9.8583e-02, 3.8863e-02,
-1.7400e-39, 3.5779e-39, 5.2800e-39,
-1.6845e-39, 4.7140e-39, 2.4244e-39,
-1.3654e-39, 2.4123e-40, -1.5360e-39,
-1.0409e-39, 1.8590e-39, -5.2161e-41,
-8.5110e-40, -1.7210e-39, -4.6624e-39,
5.0754e-40, -2.6248e-39, -5.4801e-39,
-4.9486e-39, 2.8984e-39, 4.9357e-39,
-1.4077e-39, 3.8778e-39, 5.8202e-39,
-4.1095e-39, 6.8891e-40, 5.6565e-39,
3.8021e-39, -5.4740e-41, 2.1795e-39,
-2.4185e-39, -5.8101e-39, 1.5651e-39,
-4.9775e-39, 6.0152e-39, -5.2337e-39,
-4.4350e-39, -3.8239e-39, 3.1624e-40,
-4.3665e-39, -3.0919e-39, -4.7675e-39,
-2.3335e-39, 1.8270e-39, -5.5077e-39,
5.5906e-39, 6.7732e-41, 3.7359e-39,
-5.1412e-40, -2.3239e-39, 5.1937e-39,
-4.4951e-39, -3.4928e-40, -5.0589e-39,
4.9149e-39, 1.1372e-39, 6.6368e-40,
-1.8870e-40, -5.9117e-40, -1.3973e-39,
-2.3555e-39, -1.0637e-39, 3.1692e-39,
-4.8054e-39, 4.8090e-40, 2.0873e-39,
3.8301e-39, -3.8642e-39, 4.8187e-39,
-1.6563e-39, 8.9890e-40, -3.5162e-39,
-2.3010e-01, -7.4445e-02, -1.0006e-01,
-2.4543e-01, -8.5750e-02, 1.4859e-01,
-1.3783e-01, 1.2709e-01, 2.5012e-01,
1.0310e-01, -2.3520e-02, -8.1277e-02,
-2.9267e-02, 1.0686e-01, 4.6287e-02,
-1.2342e-02, -1.7104e-02, 8.4357e-02,
-1.8492e-02, -2.0711e-02, -3.5242e-02,
7.6163e-02, 6.0853e-02, 9.4248e-02,
6.2008e-02, 1.1373e-02, 2.6609e-02,
-7.8135e-02, 1.0672e-01, -5.8380e-02,
7.1618e-02, 2.7966e-04, 1.1835e-01,
1.1306e-01, -7.8578e-03, 5.1743e-03,
-1.2123e-01, 4.9640e-02, 7.3827e-02,
-1.0377e-01, -3.7377e-02, -3.6536e-02,
5.7489e-02, -4.6279e-04, 9.0068e-02,
4.0784e-05, -3.3328e-02, 5.1191e-02,
9.6538e-02, 7.1779e-02, 1.2121e-01,
1.1598e-01, -5.9055e-02, 8.2671e-02,
-1.7292e-39, -1.7848e-39, -1.7308e-39,
-3.2817e-39, -1.7274e-39, -3.3601e-39,
-1.7252e-39, -3.4067e-39, -1.7783e-39,
-7.4053e-02, -4.2785e-01, -4.7597e-01,
4.6309e-01, 7.6018e-02, -3.5885e-01,
3.0428e-01, 8.7449e-02, 9.7880e-02,
-3.4191e-02, 1.1834e-01, -4.3273e-02,
-6.0782e-01, 9.2387e-01, -1.3972e-01,
3.0665e-01, 4.7445e-01, 4.8683e-02,
-1.8865e-02, 9.9509e-02, -4.9881e-02,
2.1640e-02, -2.0941e-01, -1.4779e-01,
1.7808e-01, -1.2572e-01, -9.6756e-02,
-1.0143e-01, 8.3153e-02, -1.0478e-01,
1.6201e-01, 2.0740e-01, -1.2653e-01,
8.1654e-02, -7.6224e-02, -8.9864e-02,
4.5383e-02, -3.6893e-02, -1.0096e-01,
2.0389e-01, 2.2557e-01, -1.9685e-01,
-9.5198e-02, 2.2877e-01, 2.1135e-02,
-1.0919e-01, -1.7563e-01, -3.5255e-01,
-1.3447e-01, 3.3709e-01, -1.9043e-01,
-2.1422e-01, -2.8848e-01, -5.3921e-02,
5.5351e-02, -5.0579e-02, -1.6168e-01,
2.5282e-01, 1.9715e-01, -2.4035e-01,
-3.0800e-02, 1.9329e-01, -1.0893e-01,
-3.4416e-39, -1.8080e-39, -1.6625e-39,
-1.6612e-39, -1.7397e-39, -1.5953e-39,
5.3047e-39, 5.4221e-39, -1.1665e-39,
2.1838e-02, -7.0635e-02, 3.6095e-01,
5.1096e-01, 6.3838e-01, 5.0716e-01,
1.1642e-01, 1.8546e-01, 1.5989e-01,
1.0799e-01, 2.8380e-01, 1.4910e-01,
-2.4305e-01, 2.3084e-01, -9.9982e-02,
-4.6839e-01, 6.0376e-01, -1.2748e-02,
8.7608e-02, 9.8828e-02, 2.1469e-02,
-3.5384e-03, -1.5689e-01, -1.1411e-01,
2.0728e-02, 5.6814e-02, -1.1090e-02,
-3.9301e-02, -9.4325e-02, -6.2119e-02,
1.2842e-01, 9.7466e-02, -2.7502e-02,
1.6560e-01, 1.5058e-01, 2.2821e-02,
-8.1287e-02, -6.3940e-03, 3.2162e-02,
9.4116e-02, -6.2567e-02, -1.2704e-01,
5.4654e-02, 1.4885e-02, 3.8166e-03,
1.9830e-01, -2.5419e-01, -6.7067e-02,
3.2303e-01, 1.6037e-01, -3.0200e-02,
1.3011e-01, 7.5455e-02, -1.2726e-02,
-1.9198e-01, -1.5419e-01, -7.5420e-02,
1.6070e-01, -6.1031e-02, -2.0179e-01,
-1.5829e-02, 1.9918e-01, 1.0960e-01,
-5.5215e-39, -5.8659e-39, -5.5573e-39,
-6.2394e-39, -6.0172e-39, -6.0159e-39,
-4.0308e-39, -4.1217e-39, -4.1372e-39,
1.6143e-01, 1.7271e-01, 4.3534e-01,
-2.4312e-01, 4.0146e-01, 4.4693e-01,
1.5442e-01, 3.9885e-01, -1.4357e-01,
-6.0236e-02, -1.2324e-01, 6.1197e-02,
-2.5842e-02, -1.0266e-02, 1.5670e-03,
2.9103e-02, 2.9966e-02, 1.1286e-01,
3.4528e-02, 1.3039e-01, 9.2736e-02,
3.5193e-02, 5.6583e-02, 5.9465e-02,
1.2846e-01, 9.3387e-02, 9.2131e-02,
1.4974e-03, 1.0196e-01, 6.7632e-02,
8.9809e-02, 5.7568e-02, -6.0621e-02,
-2.7582e-03, 3.1935e-02, 3.1299e-02,
1.3595e-01, 4.9498e-02, 1.2535e-01,
-3.9396e-02, 4.8859e-02, 4.1389e-02,
3.7026e-02, 1.3667e-01, 7.5657e-03,
-5.3476e-02, 1.9677e-02, 9.5214e-02,
1.3136e-02, 7.5560e-02, 6.2428e-03,
-5.2378e-02, -1.8704e-02, 1.0657e-01,
-4.2938e-02, -5.0199e-02, 1.4357e-01,
-5.7002e-02, 1.4158e-01, 4.9442e-02,
-6.8383e-02, 1.1316e-01, 5.2071e-02,
1.5031e-40, 2.1250e-40, 1.8673e-40,
1.5681e-40, 1.3104e-40, 1.6173e-40,
2.1560e-40, 1.8582e-40, 1.7747e-40,
8.4848e-02, -1.9845e-01, -5.1844e-01,
3.0959e-01, 3.6682e-01, 3.1208e-02,
1.9871e-01, 2.8318e-01, 1.6066e-01
}
,
{
-2.7283e-39, -4.9031e-39, -2.1039e-39,
-1.0327e-39, -5.1679e-39, -4.3300e-39,
-5.2613e-39, -3.1707e-39, -6.0916e-39,
1.5840e-39, 1.6709e-39, 1.6120e-39,
1.6716e-39, 1.7418e-39, 1.6624e-39,
1.5922e-39, 1.7383e-39, 1.5668e-39,
1.1389e-01, -4.5774e-02, 6.1423e-02,
1.3858e-01, 2.3102e-02, -6.5079e-02,
1.3269e-01, 3.2387e-02, 7.6966e-02,
-2.1531e-39, -1.6063e-39, -3.2070e-39,
-2.8531e-39, 4.6956e-39, 1.4038e-39,
2.0509e-39, -4.4924e-39, -5.3658e-39,
1.1524e-01, -5.0115e-02, 9.4187e-02,
4.2477e-02, 1.4197e-01, 2.4986e-02,
-2.8688e-02, 9.2289e-02, 4.1965e-02,
-2.1691e-01, -6.6916e-04, -1.3026e-01,
-1.9143e-01, 1.2211e-01, 1.2562e-01,
-1.2273e-01, 7.1045e-02, 1.2396e-01,
-8.0861e-02, -4.4301e-03, 6.3144e-03,
3.0338e-02, -8.6463e-03, 5.5084e-02,
-1.8370e-01, -5.0287e-02, -7.2194e-02,
7.4570e-02, 5.4483e-02, -1.2639e-02,
1.2481e-01, 1.4683e-01, -4.7581e-02,
1.6748e-01, -3.1374e-02, -1.7271e-02,
1.9801e-39, -3.3469e-39, -4.7012e-39,
-2.9869e-39, -3.2752e-39, -2.2142e-39,
-4.2927e-39, -1.9635e-39, -8.7517e-40,
2.7286e-39, 2.7755e-39, 2.7501e-39,
2.7114e-39, 2.7711e-39, 2.6858e-39,
2.5562e-39, 2.6523e-39, 2.5846e-39,
1.4015e-01, 1.0486e-01, 1.2320e-01,
4.6545e-02, 1.2068e-01, 9.2531e-02,
1.0717e-01, 3.8738e-02, 1.0181e-01,
-7.4503e-40, -1.1490e-39, 6.1230e-41,
2.4896e-39, 5.3740e-39, -1.4060e-39,
1.9095e-39, -7.1020e-40, 3.5820e-39,
-1.4348e-02, 6.4128e-02, 6.1082e-02,
-1.1112e-02, 8.5993e-02, 2.4835e-02,
1.2794e-01, -9.1072e-02, -1.3487e-02,
-5.8057e-02, 1.3080e-01, 1.0895e-01,
-1.6436e-01, 9.8593e-03, 1.5586e-02,
-1.5336e-01, 3.6391e-02, 1.4539e-01,
-4.6112e-02, 3.0102e-02, 6.2460e-02,
-2.5510e-02, 2.0437e-02, -5.6816e-02,
-1.0308e-01, -1.5284e-01, -7.1036e-02,
5.5290e-02, -6.6632e-02, 4.2268e-02,
-2.7665e-02, 9.3415e-02, 5.1026e-02,
1.5652e-01, 1.0835e-01, 9.6131e-02,
-4.2583e-39, -3.4889e-39, -5.7522e-39,
4.2701e-40, 2.8095e-39, -3.5579e-39,
2.2286e-39, 4.9865e-39, 4.0469e-39,
-6.4320e-40, -3.3384e-39, -5.9025e-39,
-7.9075e-40, -3.0577e-39, -6.0007e-39,
-8.9627e-40, -2.8374e-39, -5.8866e-39,
6.3645e-03, -5.3080e-03, -5.1759e-02,
1.0665e-01, -6.3126e-02, 5.0918e-02,
7.2193e-02, -6.8836e-02, -6.5657e-02,
2.8519e-39, -5.0955e-39, -9.6085e-40,
-3.3563e-39, -5.6038e-39, -1.6256e-39,
2.6872e-39, 1.4728e-39, -1.9908e-39,
-1.5254e-02, 9.8323e-02, 4.5504e-02,
1.3855e-01, 6.9300e-02, 1.9135e-01,
-5.2321e-02, -6.0227e-03, -1.1734e-04,
-1.4457e-01, 9.2761e-02, 4.5219e-02,
-3.0361e-01, 3.4673e-01, -2.3110e-01,
2.1017e-01, 2.4983e-01, 3.1659e-01,
-6.0569e-02, -5.4348e-02, -7.6719e-02,
-6.5060e-02, 2.8902e-01, 8.0732e-02,
-3.3425e-01, -3.1361e-01, -2.7183e-01,
2.8035e-02, -5.8134e-02, -4.3880e-02,
-1.6375e-02, 9.8195e-02, -7.4011e-02,
-5.9523e-02, 1.0234e-01, -5.3357e-02,
2.3364e-39, -2.5324e-39, -4.8333e-40,
2.2903e-41, -3.3061e-39, -2.5779e-39,
-1.8164e-39, -4.9236e-39, -4.9272e-39,
-1.2809e-39, -1.1698e-39, -1.2564e-39,
-1.3111e-39, -1.1778e-39, -1.2543e-39,
-1.4772e-39, -1.4021e-39, -1.4721e-39,
8.8919e-02, -3.4541e-03, -4.9619e-02,
1.0997e-01, 1.0257e-01, 6.9950e-02,
9.2624e-02, 3.2712e-02, 8.7916e-02,
-5.0242e-39, -6.1320e-39, 8.7891e-40,
-4.9951e-39, 2.3873e-39, -2.7823e-39,
-3.6739e-39, -1.8903e-39, 5.2150e-39,
9.6288e-02, 9.7568e-03, -5.8178e-02,
2.3313e-02, 1.1725e-01, 1.0291e-01,
-1.0111e-01, 8.3706e-02, 9.6575e-03,
-8.2531e-02, 7.0089e-02, 1.0821e-01,
-1.1016e-01, 1.8977e-01, 2.5576e-01,
-1.0221e-01, 5.9236e-02, 6.1678e-02,
2.6234e-02, 9.6868e-02, 9.2432e-02,
4.9881e-02, 5.9121e-02, -1.0477e-02,
-1.4693e-01, -1.0030e-01, -1.0608e-01,
1.1936e-01, -2.2301e-02, 1.1363e-01,
1.3981e-01, 6.7734e-02, -8.2775e-02,
1.0404e-01, -7.7360e-03, 4.2523e-02,
-2.6052e-39, 5.7201e-39, -5.6049e-39,
-3.6314e-39, -5.9232e-39, -3.6970e-39,
3.4360e-39, -5.6848e-39, -3.8308e-39,
4.6279e-39, 5.8135e-39, 2.0652e-39,
3.9864e-39, 4.4000e-39, 5.5163e-39,
2.9644e-39, 2.7537e-39, 3.6593e-39,
4.7872e-02, -2.5857e-02, 4.8810e-02,
1.0389e-01, -1.0782e-01, 4.1365e-02,
9.5778e-02, -5.2341e-02, 4.5947e-02,
-8.2652e-40, -5.7602e-39, 4.6187e-39,
-2.8365e-39, 1.4981e-39, 6.2504e-39,
-4.8330e-39, 4.0283e-39, 4.9792e-39,
-1.0893e-03, -8.2708e-02, -1.7925e-01,
8.3461e-02, 3.1339e-02, 8.8096e-02,
7.3139e-02, -1.2212e-01, 1.0489e-02,
-2.4187e-01, -3.8397e-01, 1.3730e-01,
1.9217e-01, 1.4101e-01, 4.9795e-01,
-1.1441e-01, 3.3343e-01, 7.9194e-02,
1.4556e-01, -5.1060e-01, 2.1556e-01,
3.5719e-01, 2.7282e-01, -1.9015e-01,
-1.0941e-01, 2.7634e-02, 1.1833e-01,
-9.3316e-02, -4.1307e-03, 7.8613e-02,
-2.1526e-02, -6.7141e-02, 2.5513e-02,
-3.3942e-02, -8.6282e-02, 3.0446e-02,
-4.5124e-39, -2.7154e-39, 4.9467e-39,
-4.2299e-39, -5.9485e-39, -2.9606e-39,
-4.7642e-39, -4.7981e-39, -4.0169e-39,
-3.8238e-39, 5.7381e-39, 4.0097e-39,
1.9550e-39, 4.5523e-39, 3.1206e-39,
6.0200e-39, 3.0406e-39, 2.0498e-39,
-3.2474e-01, 1.1052e-02, 4.7197e-02,
-1.4658e-01, 1.6728e-01, 5.2190e-02,
4.3174e-02, 4.5864e-02, 5.4472e-02,
2.6403e-39, 2.7421e-39, -4.3011e-39,
-3.6258e-39, -1.3708e-39, 3.6147e-39,
-1.9471e-39, 4.5896e-39, 4.5992e-39,
-9.9986e-02, 7.0727e-02, 8.5023e-02,
2.2501e-02, 1.4343e-01, 1.1878e-01,
2.8126e-02, 7.3239e-02, 1.0468e-02,
4.5032e-01, 4.4730e-01, 1.3446e-01,
-1.3374e-01, 8.8554e-02, 3.5610e-01,
3.0584e-01, 2.3536e-01, 1.6161e-01,
-5.1485e-01, 1.2372e-01, 5.4379e-02,
-2.9665e-01, -3.3157e-02, -1.8688e-01,
5.1777e-02, -1.4315e-01, -1.1366e-01,
-2.4471e-01, 5.5554e-02, 8.9284e-02,
-1.6870e-01, 7.6156e-02, 1.2472e-01,
-1.5633e-01, 4.3184e-03, 1.1078e-01,
4.0579e-39, -3.8271e-39, 1.1535e-39,
6.6968e-40, -1.1545e-39, -5.4217e-40,
3.5566e-39, -4.4956e-40, -1.7097e-39,
-4.1778e-39, -3.7655e-39, -3.7148e-39,
-3.8013e-39, -3.5225e-39, -3.4678e-39,
-3.8369e-39, -3.5583e-39, -3.6518e-39,
-1.4894e-02, 2.4801e-03, -4.6996e-02,
6.7453e-04, 1.8799e-02, 2.9889e-02,
7.2700e-03, 1.2385e-01, 9.2522e-02,
3.9300e-39, 3.1853e-39, 2.8376e-39,
2.8888e-39, -4.8734e-39, 2.3402e-39,
-3.9710e-39, -4.3243e-39, 4.1151e-39,
1.6399e-02, -8.2828e-02, -5.8361e-02,
2.1315e-02, 1.1968e-02, 6.8727e-02,
3.8558e-02, 1.5451e-02, 5.4465e-04,
1.0549e-02, -8.6468e-02, -1.8535e-01,
-1.3616e-01, 2.7371e-01, 1.1157e-01,
-1.7097e-01, 1.3659e-01, 2.2831e-02,
-3.3897e-02, 1.3307e-01, 7.4482e-03,
4.8120e-01, 7.7053e-01, 5.3354e-01,
-2.4277e-01, -5.9136e-02, -1.3419e-01,
-7.4653e-02, -6.4169e-02, -2.9526e-02,
-3.6336e-02, 7.2362e-02, -3.5332e-02,
6.2628e-02, 6.2278e-02, 3.5639e-02,
3.6614e-39, -2.6150e-39, -3.5229e-39,
5.3538e-39, -1.2368e-39, 2.1530e-39,
4.8585e-39, -2.4150e-39, 5.2220e-40,
3.8610e-40, 1.4772e-39, 2.1962e-39,
-1.8493e-40, 1.1409e-39, 1.7309e-39,
-2.5751e-40, 9.1351e-40, 1.3106e-39,
6.2867e-02, -1.2727e-01, -6.5307e-02,
1.1415e-01, -4.5529e-02, -1.1358e-01,
4.3427e-02, -6.0994e-02, -7.7808e-02,
-4.1831e-39, 1.3230e-39, 5.5853e-39,
-3.4646e-39, -7.2824e-40, -3.4263e-39,
1.5344e-39, -5.8245e-39, 1.9910e-39,
1.1000e-02, -3.7088e-03, -8.0042e-02,
9.7603e-02, 8.6581e-02, -1.8921e-03,
2.2820e-01, 6.8073e-02, -8.1081e-02,
-3.3901e-01, -1.1231e-01, -8.6476e-02,
1.1147e-01, 4.9587e-01, -1.7039e-01,
-2.0702e-01, 5.8730e-02, -1.3475e-01,
2.3548e-01, -6.8044e-02, 9.4296e-02,
4.4803e-01, 6.1517e-03, -5.5192e-02,
-2.7304e-01, -2.6003e-02, 4.0713e-01,
2.8621e-02, 6.2698e-03, -1.4746e-01,
9.4819e-02, -1.3109e-02, 3.5540e-02,
4.4047e-02, 3.5066e-02, -9.5886e-03
}
,
{
-6.7011e-03, 1.7398e-01, 1.4767e-01,
-1.9882e-02, 1.9286e-01, 4.8626e-02,
1.1465e-01, -4.4017e-02, -1.9288e-01,
-7.5817e-02, 1.5598e-01, 1.2329e-01,
3.4126e-03, -9.4884e-02, -4.2276e-02,
3.9110e-02, -1.3477e-01, -4.4951e-02,
6.0450e-02, 4.4656e-01, 3.8954e-01,
-2.1207e-01, -1.0600e-02, -5.6351e-01,
1.8074e-01, 3.0797e-02, -4.0380e-01,
-1.0733e-01, 3.7228e-02, 9.7157e-02,
-7.5810e-03, 5.5605e-02, -9.1898e-02,
-1.4992e-01, -5.3206e-02, -1.9667e-01,
-1.6667e-01, 7.6091e-02, 1.7064e-01,
2.5322e-01, -9.4636e-03, -2.7899e-01,
4.2013e-02, 1.5693e-01, 3.1124e-01,
-2.1534e-02, 1.3915e-01, -2.8199e-01,
-2.9683e-03, 1.4445e-02, -1.5552e-01,
3.4759e-02, -2.0321e-01, -1.1155e-01,
3.6164e-02, 2.8664e-01, 2.3426e-01,
-1.2525e-01, -1.7195e-01, -5.2270e-02,
3.8782e-02, 5.7734e-02, 2.1945e-01,
1.0243e-01, -1.3159e-01, -1.7844e-01,
-6.0359e-02, 1.9125e-01, 3.3553e-01,
-1.0876e-01, -1.2149e-01, -5.7185e-01,
-2.0583e-02, -4.8168e-03, -7.1908e-02,
-2.3428e-02, 2.9902e-02, 1.0888e-02,
3.6383e-02, 1.0052e-01, 2.8972e-02,
1.1415e-03, -3.4518e-02, -9.0058e-02,
7.3207e-03, 6.0961e-02, 7.5629e-02,
-4.5969e-02, 2.4314e-02, 6.7658e-02,
-1.3043e-01, -3.0343e-01, -2.0799e-01,
-4.6261e-02, -1.7650e-02, -7.2160e-02,
-2.6291e-02, 1.5707e-01, 9.5021e-02,
-4.1030e-02, -8.1977e-02, -3.0776e-02,
-3.0685e-02, 8.2163e-03, 4.0357e-02,
-6.9633e-02, 6.0690e-02, 1.5418e-02,
-1.2814e-01, 7.3968e-02, -3.3742e-03,
-1.5239e-01, 8.9941e-03, 1.7877e-01,
2.1219e-01, -5.2057e-01, -2.2284e-01,
-3.4681e-02, -1.3594e-02, 1.6700e-01,
-7.7366e-02, 8.5138e-03, -4.3159e-02,
4.0597e-02, 9.7247e-04, -3.4326e-01,
-2.1424e-01, -1.6489e-01, -4.3248e-02,
1.5987e-01, 4.6235e-01, 2.6287e-01,
-1.2270e-02, 1.3165e-01, 5.3217e-02,
7.2716e-02, -7.0677e-02, -1.7740e-01,
-6.2357e-02, 1.1932e-01, 1.5733e-01,
-1.0275e-01, 1.4966e-01, 4.8125e-02,
-4.7150e-02, 1.5516e-01, 6.9615e-02,
6.1252e-02, 5.3859e-02, 1.7052e-01,
3.1940e-02, 1.1842e-01, 4.2265e-02,
-4.9531e-02, 1.1519e-01, 9.8914e-02,
1.3455e-01, 1.3177e-01, -2.7938e-03,
1.1895e-01, 1.1377e-01, 6.1035e-02,
8.0390e-02, -4.1028e-02, 3.7415e-03,
-1.0317e-01, 1.0279e-01, -6.5789e-03,
-2.3339e-02, 7.2741e-02, 4.1662e-02,
-7.4087e-02, 8.8531e-02, -4.9697e-02,
4.6134e-02, 1.4300e-01, 1.1720e-01,
3.8271e-03, 1.7108e-01, -2.4779e-02,
6.9844e-02, -4.6467e-02, -9.1699e-02,
5.5704e-02, -3.0312e-02, -7.8252e-03,
-4.3799e-02, -1.6623e-01, -2.3006e-02,
4.9214e-02, 3.1528e-02, 3.3302e-02,
3.1213e-02, 9.8880e-02, -1.1098e-01,
4.5092e-02, -1.6922e-03, -5.1380e-02,
7.6063e-02, 1.4159e-01, 4.1409e-02,
8.0812e-02, 9.7569e-02, 4.1532e-02,
-1.1136e-01, -4.3686e-02, -1.4144e-01,
-9.7717e-02, 4.8239e-02, 5.3374e-02,
-1.1827e-01, 1.0008e-01, 8.6368e-02,
-6.2572e-02, 3.6484e-02, -6.3361e-02,
4.1008e-03, 1.6709e-02, 4.0553e-02,
2.2766e-02, 2.7241e-02, 5.1786e-02,
1.3607e-02, 5.4638e-02, 6.9439e-02,
-2.4211e-02, 4.0065e-03, -1.9540e-03,
-9.5697e-03, 3.0503e-02, 3.5809e-02,
-4.3456e-02, 2.8959e-02, 4.2898e-02,
-1.5629e-02, -9.4347e-02, 7.2799e-02,
2.3115e-01, 7.3449e-02, 6.9354e-02,
1.6014e-01, 1.8878e-01, -2.2148e-02,
-4.9274e-02, -6.9233e-03, 1.0578e-02,
-4.3291e-02, -7.8361e-03, 1.6647e-02,
-5.6168e-02, 1.0317e-02, 3.1170e-02,
1.2530e-01, -3.2398e-02, -6.5690e-02,
-2.5805e-01, 3.6079e-02, 3.5390e-02,
-1.7236e-01, 6.6798e-03, 4.8924e-02,
1.3314e-01, 5.0646e-02, -3.4844e-02,
-1.2559e-01, -1.1774e-01, 1.2898e-01,
-7.7402e-02, -1.0703e-02, -2.6359e-01,
-3.8706e-02, -2.2082e-02, 2.7591e-03,
-8.2353e-02, -3.1941e-02, -1.1937e-01,
2.9747e-02, 2.0041e-01, -5.1984e-02,
1.7919e-01, 6.3603e-02, -5.5516e-02,
1.0116e-01, 8.7370e-02, -8.6624e-02,
-8.4314e-02, 3.5997e-02, 2.1161e-01,
1.0902e-39, 9.3514e-40, 9.3074e-40,
9.8377e-40, 1.1299e-39, 8.2024e-40,
1.2062e-39, 1.0405e-39, 1.0284e-39,
-5.7829e-40, -6.7489e-40, -6.3814e-40,
-6.8460e-40, -7.9377e-40, -7.6449e-40,
-4.7632e-40, -5.6022e-40, -5.2053e-40,
1.8459e-39, 2.1036e-39, 2.1848e-39,
2.0535e-39, 2.3728e-39, 2.4416e-39,
1.7027e-39, 2.0249e-39, 2.0833e-39,
9.1594e-40, 8.0493e-40, 7.7836e-40,
7.5889e-40, 6.3026e-40, 9.3384e-40,
9.6987e-40, 1.1273e-39, 8.1906e-40,
-7.9046e-39, -7.2328e-39, -7.1040e-39,
-7.9046e-39, -7.1862e-39, -7.4931e-39,
-6.5243e-39, -7.1117e-39, -6.9941e-39,
1.3577e-39, 3.5945e-40, -3.6833e-40,
1.3768e-39, 6.9779e-40, -7.5180e-40,
5.7295e-40, -6.0767e-41, -1.3085e-39,
7.7960e-39, 7.8579e-39, 7.4482e-39,
7.4224e-39, 7.5791e-39, 7.4378e-39,
6.5819e-39, 6.7271e-39, 6.6281e-39,
-1.6535e-39, -7.7817e-40, -8.5918e-40,
-2.0861e-39, -1.3658e-39, -1.0560e-39,
-3.4360e-39, -2.6878e-39, -2.6477e-39,
4.6460e-02, 1.1676e-01, -5.9846e-02,
8.6467e-03, -1.1287e-02, 7.0129e-02,
-1.1277e-01, 1.0321e-02, -1.9567e-02,
1.2145e-01, -7.1995e-02, -1.3615e-02,
9.7877e-02, 6.6061e-02, 1.0272e-02,
1.1391e-01, 5.6974e-02, 9.7472e-02,
-3.3605e-02, 6.1751e-02, -4.3004e-02,
-5.1040e-02, -3.8798e-02, -7.1736e-02,
-1.0179e-02, 8.5964e-02, -8.1435e-04,
2.5149e-02, 7.1990e-02, 8.1534e-02,
6.3133e-02, 5.8643e-02, 4.6756e-02,
-5.3580e-03, 3.4411e-02, 5.2957e-03,
1.0652e-01, -6.6035e-02, 8.5754e-02,
3.2919e-01, -1.5958e-02, 2.1694e-03,
-9.0943e-02, -2.1920e-02, 2.9706e-02,
4.7986e-02, 1.7105e-02, -5.7711e-02,
-4.2066e-03, 6.5668e-02, -1.6617e-01,
1.0057e-02, -2.0108e-03, -1.5499e-01,
6.7941e-02, 1.7352e-01, 4.9498e-02,
6.2013e-02, 9.6180e-02, -2.9861e-03,
-1.2482e-02, 9.5709e-03, -8.7913e-02,
-8.6954e-02, 9.9646e-03, 8.0050e-02,
-4.4157e-02, -6.3008e-03, 4.0645e-02,
-7.9624e-02, 1.0856e-01, -4.5341e-04,
7.1085e-02, 5.7002e-02, 1.1673e-02,
-5.1378e-02, -2.3945e-03, -5.9532e-02,
3.4998e-02, -3.6019e-02, 1.0428e-02,
5.9774e-03, 5.4993e-03, 2.4306e-02,
-5.9813e-03, 4.4999e-02, 7.4744e-02,
-3.0773e-02, -3.6835e-02, 5.8396e-04,
-3.8644e-01, 2.4563e-01, 1.2436e-01,
-3.2986e-01, -1.1044e-01, 2.0753e-01,
-1.3621e-01, -1.3544e-01, 5.8882e-02,
8.8837e-02, 5.7460e-02, -3.0960e-02,
-1.2598e-03, 3.9124e-02, -5.3322e-02,
-4.4227e-02, -3.8000e-02, -3.2677e-02,
1.5675e-01, 1.0808e-01, 1.1024e-01,
5.4468e-01, -5.9268e-01, 1.0088e-01,
8.2360e-02, 1.9646e-01, 6.4799e-03,
1.6357e-01, 6.8273e-02, -1.2051e-01,
4.9511e-02, 4.7334e-01, -4.8876e-02,
-1.3130e-01, -5.1568e-03, 1.0088e-01,
-5.8971e-02, 2.5775e-01, 9.0169e-02,
-3.0461e-01, -3.2353e-02, -2.0293e-01,
1.3897e-02, 1.4249e-01, -5.8661e-02,
-1.3624e-01, -5.3026e-02, 3.1038e-03,
-5.6211e-01, -2.8375e-01, -1.2524e-01,
-2.3813e-01, -2.2439e-02, -4.4082e-02,
9.9066e-02, -7.1735e-02, 2.2345e-02,
-1.4791e-02, 1.3225e-01, 8.9460e-02,
-4.8986e-02, -3.2296e-02, -4.7474e-02,
6.5865e-02, -8.0697e-02, -6.8475e-02,
-7.6845e-02, 1.1568e-01, 3.7443e-03,
1.0448e-01, -3.3206e-03, 5.4523e-02,
5.5741e-02, 5.0917e-02, 1.0209e-01,
-9.6729e-02, 7.8876e-02, -4.9550e-02,
-3.8926e-02, 7.1163e-02, 8.9436e-02,
-1.4001e-03, -9.4980e-02, -7.7747e-02,
9.4335e-02, 1.1605e-01, 9.5715e-02,
1.7951e-02, 4.3177e-03, -5.6937e-02,
4.4558e-02, -5.2562e-02, 4.0652e-02,
1.8058e-01, -1.0763e-01, 4.8927e-02,
-5.2569e-03, -1.3437e-01, 2.8578e-02,
1.3592e-02, -3.9346e-02, 1.0003e-01,
1.8091e-01, 7.2687e-03, -3.7241e-02,
6.0438e-02, 5.7872e-02, 7.3778e-02,
1.2411e-02, 4.1856e-02, -2.8892e-02,
3.2884e-02, 6.9072e-02, -5.9363e-02,
-1.7112e-01, -9.9734e-02, -7.3417e-02,
-8.9623e-02, 4.5292e-02, -1.6635e-01,
-3.1895e-02, 1.4284e-01, 2.0752e-01,
2.3383e-02, -1.3490e-02, 5.1593e-03
}
,
{
5.8708e-01, 2.6026e-01, 8.8379e-02,
3.1818e-01, 7.0055e-03, 1.1652e-01,
1.1719e-01, 8.7711e-02, -1.1687e-02,
7.5741e-02, -3.7970e-01, 1.6001e-01,
1.0739e-01, 3.1735e-01, 2.0061e-01,
8.6719e-02, 8.5111e-02, -3.9354e-02,
-9.9512e-02, -9.1524e-02, -9.7984e-02,
5.6333e-02, -1.5928e-01, 1.1998e-03,
2.7488e-02, 2.8168e-02, 1.3768e-01,
5.9686e-02, 2.8931e-01, -1.7131e-02,
1.6391e-01, 3.3748e-01, 1.2296e-01,
8.9242e-02, 1.4761e-01, 1.7187e-01,
-2.6352e-39, -4.0703e-39, -5.1751e-39,
-2.5214e-39, -3.9666e-39, -4.6282e-39,
-2.4635e-39, -3.6734e-39, -4.3359e-39,
-7.1654e-02, 7.9691e-03, -1.0219e-01,
-5.5684e-02, -1.3065e-01, -1.9106e-02,
1.0561e-01, 5.9054e-02, -2.1279e-02,
-1.8840e-02, 1.6690e-01, 3.8050e-01,
6.2779e-02, -1.2124e-01, 5.0304e-01,
2.1870e-02, 1.7631e-01, 1.4858e-01,
1.4614e-01, -1.1767e-01, -3.9155e-02,
1.2963e-01, -4.6753e-02, 1.3848e-01,
-8.2292e-02, 2.1908e-01, 6.2794e-02,
-3.2625e-01, -8.8528e-03, -6.5603e-03,
5.4245e-02, 2.7983e-01, 2.1608e-01,
8.5890e-02, 1.0955e-01, -1.1606e-01,
9.7435e-02, 1.5911e-01, 6.7285e-02,
3.9570e-02, 1.9333e-01, -1.5531e-02,
-2.3475e-01, -2.5006e-02, 2.8106e-02,
6.8740e-03, 1.3261e-01, -3.8563e-02,
8.8758e-02, -4.2225e-02, 4.7042e-02,
5.6284e-02, -2.8303e-02, 3.4532e-03,
-4.0265e-02, -3.0645e-02, -5.2059e-02,
-4.6196e-02, -2.4868e-02, -3.3257e-02,
-3.7208e-02, -2.4100e-03, -7.1959e-04,
6.4237e-39, 6.1438e-39, 6.5434e-39,
6.1596e-39, 6.1608e-39, 6.3157e-39,
6.4263e-39, 6.4625e-39, 6.5877e-39,
1.1092e-01, -4.4784e-02, 9.1292e-02,
9.2900e-02, 1.2459e-01, -7.1447e-02,
2.6158e-02, -5.0219e-02, -5.6136e-02,
-5.8603e-02, 2.9323e-02, -2.4230e-01,
-9.4921e-02, 1.9103e-01, 1.1670e-01,
1.2022e-02, 6.2830e-02, 3.0393e-01,
3.3819e-02, 1.0040e-01, 8.2600e-02,
-8.7604e-02, 7.0641e-02, -1.0132e-01,
-9.9371e-02, 8.9363e-02, -1.0703e-01,
4.4603e-01, 7.9636e-03, 1.8834e-01,
1.1859e-01, 4.0760e-01, 9.6841e-02,
-1.1735e-01, 2.3993e-01, -7.7916e-02,
6.3481e-02, -1.4958e-01, 1.1554e-02,
5.2668e-02, 3.4379e-01, 8.3536e-03,
-5.5403e-02, 1.1655e-01, -7.5022e-02,
-8.2992e-02, -7.0322e-02, -1.0078e-01,
-1.4516e-02, -1.6558e-02, 6.6806e-02,
-6.7454e-04, -5.7525e-02, 1.5772e-01,
1.6446e-01, -1.1897e-02, -8.3387e-02,
7.1339e-02, 1.6254e-01, 1.6963e-01,
1.2630e-02, 5.7933e-02, 8.4686e-02,
-5.6318e-39, -6.1837e-39, -6.1661e-39,
-5.9923e-39, -6.2371e-39, -6.4922e-39,
-6.4206e-39, -6.6092e-39, -7.1603e-39,
4.6507e-02, -4.5924e-02, -7.3838e-02,
-3.3012e-02, 5.1295e-02, -7.4884e-02,
7.5389e-02, 1.2002e-01, 3.9442e-03,
9.9461e-02, 1.9607e-01, 1.4896e-01,
-1.1191e-02, 1.8352e-01, 2.6778e-01,
8.0977e-02, 1.0885e-01, 2.5331e-01,
3.1503e-02, -3.0004e-01, -6.9114e-02,
2.0705e-01, -2.0978e-02, 1.5154e-01,
6.3033e-02, -1.5721e-01, 5.1067e-02,
-1.1220e-02, 1.5315e-01, 4.5277e-03,
3.3250e-01, 1.4207e-01, 1.3469e-01,
5.2996e-01, -2.5803e-01, -4.5525e-02,
3.9807e-02, -1.7088e-01, -1.2414e-01,
2.1564e-01, -2.9160e-01, -1.8796e-01,
1.5482e-02, 2.7005e-01, 8.2446e-02,
5.4906e-02, -1.0507e-01, -8.0069e-02,
-4.5729e-03, -2.0621e-02, 5.0088e-02,
2.5479e-02, 9.5924e-02, 8.3813e-02,
4.7833e-02, -2.6191e-01, 3.3483e-02,
6.1653e-02, 7.1940e-03, -1.3578e-01,
1.7662e-01, -2.8194e-02, -2.7509e-02,
-1.9419e-39, -2.4904e-39, -2.7567e-39,
-2.9896e-39, -3.2700e-39, -3.6336e-39,
-3.8942e-39, -4.2028e-39, -4.5229e-39,
-1.6839e-02, -9.4421e-02, -3.0147e-02,
-6.5974e-02, -1.6716e-02, 5.0672e-02,
-7.9841e-02, -4.7086e-03, 5.0016e-02,
1.8223e-04, 3.3984e-03, 5.1965e-02,
-7.3512e-02, -5.6604e-03, -1.1630e-01,
-1.0767e-01, 3.2261e-02, -2.0044e-01,
1.0995e-01, 4.3581e-02, -3.9397e-02,
-1.4476e-02, -2.3087e-02, 2.6423e-03,
1.2047e-02, 1.2084e-01, 1.8563e-01,
-2.8497e-01, -2.5353e-01, 1.0933e-01,
8.8974e-03, 1.3315e-01, 1.9153e-01,
2.0427e-02, -8.9900e-02, 2.2363e-02,
2.8575e-02, 1.6351e-01, 1.1876e-01,
-2.7438e-02, -1.0816e-03, -5.5680e-02,
5.1369e-02, -2.0575e-02, 4.5232e-02,
9.4988e-02, 2.5418e-02, 8.9888e-02,
9.6631e-02, 1.5828e-01, 1.1577e-01,
-2.9665e-02, 3.2035e-02, 1.4428e-01,
7.4352e-03, 2.4917e-03, 4.2713e-03,
1.2534e-02, 2.1314e-02, 1.5963e-02,
2.2920e-03, 2.1864e-02, 2.2921e-02,
7.1089e-40, 5.3581e-40, 4.5922e-40,
6.2492e-40, 4.6365e-40, 4.5466e-40,
9.2740e-40, 7.7219e-40, 7.4187e-40,
-7.0909e-02, 1.1127e-01, -8.8953e-02,
-5.0537e-04, 4.5664e-05, 1.3829e-02,
7.4380e-02, 1.3900e-03, 4.0345e-02,
5.7173e-02, 8.7514e-02, -3.9945e-01,
4.4116e-02, 1.4148e-01, -2.7578e-02,
-1.2133e-02, 1.9647e-01, -2.6767e-02,
8.5870e-02, -1.3723e-02, 1.3408e-02,
7.9471e-03, 7.8321e-02, 5.1118e-02,
-8.3660e-02, -7.1584e-02, 2.7423e-02,
-5.5651e-39, -3.2350e-39, 4.7534e-39,
-4.8581e-39, -5.8010e-39, 6.3268e-39,
-3.4016e-39, 6.2313e-39, 5.7413e-39,
-3.0708e-39, 6.0155e-39, -6.3317e-39,
-3.1054e-39, -5.5914e-39, -6.4181e-39,
-1.3636e-40, -6.0343e-39, -6.2034e-39,
1.0108e-39, -2.5283e-39, -8.6098e-40,
1.0088e-39, -2.3042e-39, -8.2029e-40,
1.2802e-39, -3.7761e-39, -4.6451e-40,
1.4160e-39, 7.3869e-40, 1.3275e-39,
1.2560e-39, 1.0078e-39, 1.2296e-39,
-2.4490e-39, 8.6071e-40, -2.4510e-39,
2.1753e-39, -2.0576e-39, -2.1365e-39,
2.0157e-39, 2.0755e-39, 1.9439e-39,
2.0998e-39, 2.0732e-39, 2.1072e-39,
-1.1289e-39, -1.6132e-39, 4.8117e-40,
1.2029e-39, -1.3112e-39, 6.4761e-40,
1.4958e-39, -9.2719e-40, 8.9526e-40,
3.6032e-39, -4.9803e-39, -2.4410e-39,
-1.6429e-39, -4.9602e-39, -5.9626e-39,
-1.6627e-39, -4.9809e-39, -5.6258e-39,
1.6619e-39, 1.7856e-39, 5.1822e-39,
1.5443e-39, 1.4215e-39, 6.1830e-39,
1.4242e-39, -1.7895e-39, 5.2206e-39,
-2.4764e-01, -2.8696e-01, -5.7562e-03,
1.9255e-01, 5.1335e-02, -1.4512e-01,
-1.1017e-02, -3.6505e-02, -1.1773e-01,
5.8651e-02, -1.9354e-02, 2.1595e-02,
-3.5114e-03, 1.8335e-01, 4.0043e-02,
1.0579e-01, -6.3055e-02, 2.6981e-02,
-1.4351e-02, -1.5029e-02, -9.7792e-02,
4.6718e-02, 3.8673e-02, -2.3410e-02,
-2.8942e-03, -8.4898e-03, -3.3613e-02,
2.0298e-01, 9.7218e-02, 1.5052e-01,
3.2108e-01, 2.6568e-01, 1.3809e-03,
1.0008e-01, 6.9262e-02, -4.7810e-02,
4.1291e-39, 4.3762e-39, 4.2724e-39,
4.5864e-39, 4.7827e-39, 4.8821e-39,
4.5529e-39, 4.6921e-39, 4.7519e-39,
9.1246e-03, -1.8136e-02, -5.8517e-03,
9.1080e-03, 4.2591e-02, -1.5604e-02,
-3.6270e-02, 5.9184e-02, 2.3189e-02,
4.2636e-02, 3.6600e-01, 4.7134e-01,
3.6666e-02, 4.3565e-01, 2.1105e-01,
-5.2747e-02, 4.0503e-01, 2.0926e-01,
8.8427e-02, 4.9138e-02, -2.3381e-01,
-5.6521e-02, 7.5013e-02, -1.4783e-01,
-4.7299e-02, -8.1200e-02, -6.5665e-02,
-1.6281e-01, -2.3070e-01, 5.4033e-02,
1.1527e-01, 3.4730e-01, 1.9293e-02,
-1.8352e-02, 2.0626e-01, -1.1955e-01,
8.1665e-02, 3.8584e-02, 2.7958e-03,
6.4294e-02, 1.3912e-01, -5.6370e-02,
-1.7618e-02, 9.0357e-02, -5.5021e-03,
9.3211e-05, 1.5219e-01, 1.0844e-01,
7.6218e-02, 1.7016e-01, 9.2438e-02,
4.3387e-02, 8.0141e-02, -3.2034e-02,
9.2121e-03, -2.8742e-03, -1.5988e-03,
9.1980e-03, 1.6983e-02, 3.3154e-03,
-2.5642e-02, 4.1607e-03, 6.9246e-03,
3.7665e-40, -4.0391e-41, -4.0502e-41,
2.2436e-40, -1.7190e-40, 1.6583e-40,
1.4090e-40, 2.2914e-41, 6.7388e-41,
-8.1776e-02, 9.0814e-02, 1.0222e-01,
-3.4949e-02, 1.0266e-01, 3.6826e-02,
-8.3856e-02, 1.1102e-01, 1.1026e-01,
1.5993e-02, -1.1626e-01, -3.0870e-01,
-3.4119e-03, 1.7638e-01, -1.9092e-01,
-1.2549e-01, 3.2538e-01, -7.9381e-02,
3.8433e-03, -8.2530e-02, 3.2103e-02,
-1.1637e-02, -1.0371e-01, 2.3851e-02,
2.5390e-02, 7.7085e-02, 8.9536e-02
}
,
{
-2.8918e-02, -8.3719e-02, -3.3026e-02,
-2.2620e-01, 2.4280e-02, -2.1254e-01,
2.8231e-02, 3.5323e-02, -2.8425e-02,
1.6891e-01, 3.8192e-03, 7.2794e-02,
-1.6364e-01, -4.1031e-02, -1.3141e-02,
-3.9478e-02, 1.4910e-01, -7.0978e-02,
-6.3880e-02, 9.8206e-02, 1.3163e-01,
1.5778e-01, 1.1914e-01, 3.3277e-01,
-3.6808e-01, -5.5627e-01, 1.4401e-01,
-4.0314e-01, 3.6298e-01, -3.8212e-02,
-2.3782e-01, 2.5410e-01, -2.2334e-01,
7.6542e-02, 9.4998e-02, 3.3399e-02,
-1.8601e-01, -1.8863e-02, -4.1835e-02,
-5.8671e-02, -8.9987e-02, -6.1069e-02,
-7.1062e-02, -9.5987e-02, 1.2318e-02,
5.4541e-39, -1.8871e-39, 4.5048e-39,
-2.2237e-39, -5.4753e-39, 1.4395e-39,
-3.5753e-39, 6.1466e-40, -2.1567e-39,
4.5273e-02, 1.1619e-02, 1.1379e-01,
1.4093e-01, 1.0444e-01, 1.1283e-01,
-3.0230e-02, 3.1937e-01, 5.0541e-02,
8.2862e-02, -3.1540e-02, -6.4833e-02,
1.5168e-01, 1.7613e-03, 4.2690e-02,
1.8820e-01, 4.3783e-02, 6.3473e-02,
8.0477e-02, 1.0397e-01, -3.6337e-02,
-7.2828e-02, 6.4048e-02, 4.2476e-02,
-1.3974e-04, -2.2468e-01, -4.9189e-02,
-2.7478e-03, 8.7663e-03, 4.3870e-02,
-3.3168e-02, 1.1915e-01, -1.8083e-02,
4.8155e-02, -4.1742e-02, 1.1251e-01,
-6.1535e-02, 5.1782e-02, -2.3494e-02,
5.1677e-02, 1.4067e-01, -1.0377e-01,
3.2951e-03, 1.1942e-02, -1.1775e-01,
-2.2104e-02, -8.1073e-02, -3.7509e-02,
6.8970e-03, 1.6406e-02, 4.6923e-02,
-8.8448e-03, 2.9130e-02, 3.1024e-02,
7.6795e-02, 4.6816e-02, -1.3204e-02,
1.3988e-01, 1.1175e-01, 8.7121e-02,
1.2097e-01, -3.8463e-02, 6.7387e-02,
1.4708e-39, 1.7125e-39, 2.7764e-39,
1.5203e-39, 1.5811e-39, 4.4921e-39,
1.8828e-39, 1.7593e-39, 2.3774e-39,
4.3474e-02, -4.7065e-02, -7.1999e-02,
6.0338e-02, 3.7240e-02, 2.8802e-02,
-4.0701e-02, 1.8627e-02, -1.8181e-02,
5.5169e-02, 1.1874e-01, -7.0475e-02,
-1.3438e-02, 1.4335e-01, 1.5180e-01,
5.6331e-02, 7.9719e-02, 6.2691e-03,
-6.6460e-02, 2.7455e-01, 5.5916e-02,
1.3515e-01, -3.7263e-01, 1.3463e-01,
-4.0820e-05, 3.1896e-01, -8.3871e-02,
-7.6172e-02, 6.1963e-02, -1.3804e-02,
-5.2852e-02, 1.0006e-01, -3.4106e-02,
6.7218e-02, -3.8616e-03, -7.1788e-02,
1.6386e-02, -1.8612e-02, -1.7354e-01,
-1.2166e-01, 1.2667e-02, -3.3852e-02,
-3.2897e-02, 1.0343e-01, 2.4924e-01,
-1.3272e-02, 1.5705e-01, 6.7731e-02,
1.0637e-01, 1.9482e-02, -2.0655e-01,
-5.9087e-03, -7.1073e-02, 1.8723e-02,
-2.6087e-02, 1.5997e-01, 9.6264e-02,
1.2431e-01, 1.1462e-01, -9.7197e-02,
-6.2347e-02, -4.5239e-02, -2.6443e-02,
3.7406e-39, -4.6345e-40, 3.7971e-39,
-3.8112e-39, -3.5585e-39, 4.6938e-39,
6.0588e-39, -4.2403e-39, 1.5311e-39,
1.6381e-01, -6.8390e-02, 2.6527e-02,
-9.8612e-02, 2.1953e-01, -2.1886e-01,
7.4841e-02, -1.2118e-01, -8.1700e-02,
4.4974e-02, 7.7514e-02, -8.4620e-02,
-2.9808e-02, 2.1591e-02, -3.9502e-02,
-5.5797e-02, -6.5105e-02, -5.9860e-02,
-3.7811e-01, -2.3056e-01, -7.4491e-02,
4.0833e-02, -2.2613e-01, -1.4986e-01,
-1.0974e-01, -6.5161e-01, 1.7546e-01,
7.7903e-02, -1.5969e-02, -6.3040e-02,
-1.7819e-01, -7.1414e-02, 1.8451e-02,
-1.0618e-01, 3.5614e-03, 3.6719e-02,
1.5666e-01, 3.9222e-01, 9.1678e-02,
1.4519e-01, 5.7331e-01, -7.3466e-02,
1.0271e-01, 1.0803e-01, -1.3150e-01,
3.7496e-01, 1.5001e-01, 1.4727e-01,
3.2151e-01, 1.2875e-01, -8.1645e-02,
2.8629e-01, 1.9329e-01, -8.0009e-02,
-9.9557e-02, -2.6954e-02, 2.6042e-02,
-5.3374e-02, 1.1369e-01, 4.6503e-02,
-3.4068e-02, 9.1849e-03, -9.1420e-02,
4.6343e-39, 4.8289e-40, 3.1694e-40,
-3.5093e-39, -4.7356e-39, 7.1265e-40,
-4.9626e-39, -2.1280e-39, 1.8542e-39,
-1.3634e-01, -5.4825e-02, -6.6125e-02,
-2.0694e-01, 1.4924e-01, 1.4028e-01,
3.2735e-02, 7.6360e-02, -9.2541e-02,
-1.2149e-01, -7.9789e-02, -2.9591e-02,
1.2852e-02, 1.2457e-01, 1.3081e-02,
-3.2966e-03, 1.1089e-01, 8.6461e-02,
1.4352e-01, 5.9238e-02, -2.1140e-02,
7.3999e-02, 2.0893e-01, 3.5512e-02,
-5.3110e-02, 3.9222e-01, 1.3103e-01,
1.0168e-01, 1.6685e-02, 5.1616e-02,
9.8241e-02, -1.6502e-01, -1.2586e-01,
8.3915e-02, 7.4837e-03, 5.7355e-02,
-3.4982e-02, -1.2773e-01, 6.8213e-02,
-1.4674e-01, -3.6844e-01, 8.1546e-02,
-1.5385e-01, -7.0368e-02, 4.3894e-02,
7.8201e-02, -1.3952e-01, 1.5154e-01,
2.3880e-02, 1.4078e-01, -1.2906e-01,
-1.8268e-01, -1.5687e-02, -1.2588e-01,
-9.4643e-03, 1.4718e-02, 7.4932e-02,
3.0996e-02, -1.2339e-01, 1.7452e-01,
4.4221e-02, -1.3808e-01, -1.0205e-02,
-8.6959e-40, -3.7907e-39, -1.6020e-41,
4.3567e-40, 1.4647e-39, 6.5692e-40,
5.4286e-39, 8.8667e-40, -3.5047e-39,
2.4116e-02, -9.5358e-02, 1.6468e-01,
3.1916e-01, -2.3472e-01, -2.1644e-01,
1.2945e-01, -1.8403e-02, -3.2247e-02,
1.3666e-02, -3.0548e-02, -4.7635e-02,
-9.2714e-02, -2.1605e-01, -5.9464e-02,
-8.9110e-03, -3.9299e-03, -2.3289e-02,
-1.7855e-01, 9.0661e-03, -1.9142e-02,
-5.6754e-02, -5.4451e-01, -5.7664e-01,
1.6835e-01, 2.0531e-02, 2.0812e-01,
5.2794e-02, -9.0414e-02, 3.5560e-02,
3.7395e-02, 5.9355e-02, -3.6676e-02,
3.8035e-02, 6.7844e-02, 1.1042e-01,
5.0372e-02, 6.8188e-02, -8.5353e-02,
2.2769e-01, 5.9758e-01, -7.4568e-02,
7.8316e-02, 8.4925e-02, -4.0400e-02,
-7.7984e-02, -2.0739e-01, 1.1736e-01,
2.4528e-02, 2.1850e-01, 2.5639e-01,
-2.4561e-02, 8.4661e-02, -9.2191e-02,
-2.7006e-02, -7.8921e-02, -2.7124e-02,
-5.9232e-03, -2.7693e-02, 5.9524e-02,
9.7704e-02, 9.6223e-02, 2.0432e-02,
-2.5588e-39, 5.5478e-39, -5.6209e-39,
-4.7285e-39, 4.5875e-39, -5.7483e-39,
6.7240e-40, -3.5113e-39, -3.6246e-39,
1.6870e-03, -2.1707e-01, -3.8895e-02,
-5.8465e-02, -5.9146e-02, 1.1936e-01,
-2.7727e-02, -9.5047e-02, -2.2627e-01,
-9.5155e-02, -7.1422e-02, 9.4611e-03,
3.7587e-03, 1.6966e-02, 2.8839e-02,
-3.0794e-02, 1.9888e-02, -5.2541e-02,
-1.0708e-02, 3.0171e-02, -3.0473e-01,
-1.0214e-01, 4.2017e-02, 2.5568e-01,
-9.8664e-02, -5.5928e-01, -7.6876e-02,
-8.6821e-03, 4.6484e-02, -3.0836e-01,
-1.0205e-01, 6.8113e-02, -2.8059e-01,
-5.7828e-02, 2.0990e-02, -1.2843e-01,
7.5680e-02, 1.7504e-02, 1.6278e-01,
1.4075e-01, 2.4361e-01, 2.2737e-01,
-1.3044e-01, 8.2145e-03, 1.6344e-01,
-2.4780e-03, 1.5108e-01, 1.3313e-02,
-9.5257e-02, 6.1810e-02, -1.9386e-01,
7.1365e-02, 1.5328e-01, 9.5848e-04,
1.2278e-01, 7.8318e-02, 3.3400e-02,
4.8597e-02, 6.0632e-02, -5.7238e-02,
3.2522e-02, 4.5926e-02, -9.5566e-02,
1.0844e-39, -3.2490e-39, -2.6904e-39,
-3.0517e-39, 4.7535e-39, 4.3440e-39,
-1.3996e-39, 4.5201e-39, -3.6165e-39,
-5.6164e-02, 1.0353e-01, 6.6228e-02,
8.2147e-02, 4.7827e-01, 1.2004e-01,
-6.8150e-02, 1.8340e-01, 2.2113e-01,
1.0580e-05, -2.0949e-01, -1.0358e-01,
1.6206e-01, 1.2538e-01, -1.3104e-01,
1.3700e-01, 2.9282e-02, -8.7020e-02,
4.5467e-39, 5.9787e-39, 2.6105e-39,
-1.2670e-39, 2.9513e-39, -1.0811e-39,
-3.9129e-39, -1.8499e-39, 2.9297e-39,
5.7414e-39, 5.5907e-39, 5.5702e-39,
5.9004e-39, 5.7585e-39, 6.3188e-39,
5.7395e-39, 5.6146e-39, 5.6451e-39,
-7.3964e-39, -6.3330e-39, -5.5236e-39,
-7.5172e-39, -5.8828e-39, -3.7555e-39,
-6.9528e-39, -7.7656e-39, -5.5115e-39,
-7.9031e-39, -7.8200e-39, -7.7914e-39,
-7.4570e-39, -7.6413e-39, -7.9054e-39,
-7.3437e-39, -6.7956e-39, -7.0789e-39,
-3.6774e-40, 1.3572e-40, 3.0250e-40,
-4.1792e-40, -4.6240e-40, 2.2528e-40,
-5.2143e-40, -5.6847e-40, -4.2768e-40,
-4.0128e-39, 1.3485e-39, 1.3436e-39,
1.5337e-39, -3.9186e-39, 1.2120e-39,
1.2992e-39, 1.5671e-39, 1.5659e-39,
-4.6533e-39, -4.7029e-39, -6.0334e-39,
-5.1157e-39, -5.3257e-39, -5.8595e-39,
-4.3046e-39, -4.4391e-39, -5.0039e-39,
-1.0025e-39, -1.0145e-39, -8.6762e-40,
-1.0282e-39, -1.0939e-39, -9.4134e-40,
-1.1868e-39, -1.2133e-39, -5.4261e-40
}
,
{
-1.2633e-01, 2.7332e-01, -4.6674e-01,
-9.4537e-03, 9.6797e-02, -6.4975e-01,
1.8103e-02, 2.7190e-03, 2.3888e-01,
4.8553e-02, -8.7297e-02, 1.8415e-01,
3.1194e-02, -7.2899e-02, -8.1835e-02,
7.1639e-02, -3.1455e-02, -6.2866e-02,
-2.1413e-02, 4.6066e-02, 9.2372e-02,
1.5761e-01, -1.0352e-01, -3.4808e-01,
2.3715e-02, 1.6453e-01, -1.3699e-01,
1.1705e-01, -1.6882e-02, 1.2575e-01,
-2.9834e-02, -1.1558e-01, 4.7318e-01,
3.5301e-02, 1.1246e-01, 3.5038e-03,
1.5837e-01, -2.9968e-01, 1.6094e-01,
4.0562e-02, -1.6329e-01, -3.7023e-02,
-3.9991e-02, 1.7001e-01, -2.7735e-03,
8.8139e-02, -2.4828e-01, 5.5751e-04,
-1.3871e-01, -2.4839e-01, 1.7996e-03,
-1.1670e-01, 3.3651e-02, -2.9559e-02,
3.8572e-03, 3.7329e-02, 4.7511e-02,
-7.8848e-02, 1.2844e-01, 9.2677e-02,
-8.5041e-02, 5.7212e-02, -1.0415e-02,
-3.2462e-39, 2.3003e-39, 4.9676e-39,
-3.9261e-39, -6.8290e-40, 5.9119e-39,
-4.1242e-39, -1.1996e-39, 3.8436e-39,
-2.3243e-02, -2.2525e-02, 3.9668e-02,
-1.1210e-01, -2.3892e-01, 1.6431e-01,
-1.3998e-01, -1.5857e-01, -1.5625e-01,
-1.7634e-02, -3.9174e-02, -9.0936e-03,
-3.9428e-03, -1.6411e-02, 2.6484e-03,
1.1376e-02, -2.9057e-03, 6.3382e-02,
4.8930e-02, 9.1298e-02, 1.8195e-02,
-6.3365e-02, -1.5407e-01, 8.1543e-02,
4.9919e-02, 1.6852e-01, 4.4053e-02,
-4.8682e-02, -7.3614e-02, -6.9206e-03,
-4.8193e-02, -2.3704e-01, -8.3394e-03,
5.6024e-02, 3.7845e-01, -2.4550e-02,
5.2050e-02, 2.2027e-01, -4.1328e-02,
-6.6327e-02, 1.0450e-01, 1.7058e-02,
-1.2047e-01, 5.2494e-02, -1.8018e-02,
5.4807e-02, 1.1177e-01, 2.3511e-02,
6.0413e-03, -3.2457e-02, 7.6611e-02,
-2.1276e-02, 3.0054e-02, 5.0752e-02,
7.5556e-02, 2.5734e-02, -6.0634e-02,
1.2201e-01, -4.1533e-01, 2.7634e-02,
4.5560e-01, 3.2832e-01, 2.6277e-02,
1.9889e-39, 3.8337e-39, 4.0170e-39,
1.5149e-39, 3.6456e-39, 4.0474e-39,
1.1508e-39, 2.7381e-39, 3.8673e-39,
-7.9206e-02, -2.0763e-02, -2.4842e-01,
-6.5777e-02, -1.8446e-01, 2.6178e-01,
-1.7908e-02, -2.3039e-01, -3.5767e-01,
1.0324e-02, 1.3610e-01, 8.6519e-02,
1.3499e-01, 3.1933e-02, 9.1822e-03,
-3.6017e-02, -2.2056e-01, -2.3258e-01,
-7.6185e-02, -2.8981e-01, -1.1816e-01,
-9.9048e-02, 5.3879e-02, -1.7351e-01,
-2.1874e-01, -1.2109e-01, -3.1457e-01,
5.1576e-02, -2.5656e-02, 4.6789e-02,
7.6286e-02, 6.0126e-01, -2.5925e-01,
-5.3443e-02, -3.3656e-01, 4.7585e-01,
-4.7442e-02, -5.1580e-02, -8.5216e-02,
-1.0600e-01, -1.3859e-01, -3.1484e-01,
2.1454e-01, -1.1851e-01, -7.6614e-02,
-7.8873e-03, -7.0275e-02, -1.0958e-01,
-8.0654e-02, 1.3946e-01, 2.5292e-01,
1.3254e-03, -6.7372e-02, -2.6429e-01,
-8.2344e-02, 1.2388e-01, 5.2930e-02,
8.3665e-02, 3.9729e-01, 4.7687e-02,
-4.4502e-02, -8.3105e-02, -1.6430e-01,
1.2825e-39, 1.7532e-39, 2.1774e-39,
-2.1331e-39, -2.1826e-39, -1.0009e-39,
3.7081e-39, 2.0015e-39, -5.8349e-40,
-3.5278e-02, 6.5211e-02, -5.4199e-03,
8.3961e-02, 3.1410e-02, 4.4510e-02,
-5.4905e-02, 4.0727e-02, -1.5710e-02,
1.0813e-01, 8.2043e-03, 4.1303e-02,
1.3405e-01, 1.4150e-01, 7.2155e-02,
3.3942e-02, -4.7781e-02, 1.6095e-01,
-1.4266e-01, -2.5283e-02, 6.4043e-03,
-1.8699e-02, 1.0895e-01, -2.1497e-02,
5.5074e-02, 1.7031e-02, 1.0572e-01,
7.3199e-04, 1.0813e-01, -9.0280e-05,
1.4808e-01, 2.5436e-01, -1.3749e-01,
2.2936e-02, -7.9733e-02, -2.2360e-01,
6.0406e-02, -1.2874e-01, -7.4692e-02,
-1.3216e-01, -9.9889e-03, 2.7608e-03,
-1.1412e-01, -5.1312e-02, -1.7196e-02,
-2.2800e-02, -1.2112e-01, -9.3855e-03,
3.6905e-02, 1.0049e-01, 9.0602e-03,
-7.3200e-02, 1.0628e-01, -4.8218e-02,
-4.6525e-02, 6.0314e-02, -3.6467e-03,
-8.0943e-02, 2.5461e-01, 1.5461e-01,
-5.7708e-02, -5.7823e-02, 5.4042e-02,
3.8847e-39, 3.5806e-39, 4.1610e-39,
3.9082e-39, 4.1898e-39, 4.1926e-39,
4.1200e-39, 4.3759e-39, 4.3977e-39,
-3.3576e-01, 9.5443e-02, 2.7804e-02,
-2.3834e-01, -7.2650e-01, -1.2229e-01,
1.0380e-01, 1.9520e-01, 3.4571e-02,
-3.7291e-02, 7.6216e-02, 8.6171e-02,
-1.6324e-01, -8.6759e-03, 4.3038e-02,
-3.4364e-02, -7.2777e-03, 3.7451e-02,
1.8826e-01, 1.6387e-01, -3.4750e-02,
-2.0203e-01, 2.4170e-01, 9.0358e-05,
-1.3049e-01, 9.6855e-02, -1.6737e-03,
-6.3782e-02, 7.1413e-02, -6.5077e-02,
-1.5262e-01, 4.3261e-01, -8.4224e-02,
6.4632e-02, 1.0553e-01, -1.5274e-01,
4.4294e-05, 8.6239e-02, 5.7537e-03,
-5.7633e-01, -5.0076e-03, -5.2298e-02,
1.8556e-01, -1.1332e-02, -2.7010e-02,
1.6155e-01, -3.0337e-02, -9.6808e-03,
-2.8404e-01, -2.7625e-02, 1.6058e-02,
5.7937e-02, -6.6464e-02, 1.1096e-02,
7.8268e-02, 8.6122e-02, 2.9298e-02,
6.4696e-02, 2.0285e-01, 4.3660e-02,
1.5339e-01, -3.7650e-02, 7.1438e-03,
-8.9058e-40, -3.6429e-39, -4.7562e-39,
8.3914e-40, -2.8054e-39, -3.6702e-39,
4.3666e-39, -1.0602e-39, -3.0369e-39,
7.2731e-02, -1.0227e-01, -1.9583e-02,
-1.7466e-02, -2.0097e-01, 9.3108e-02,
6.5196e-02, -1.1880e-01, -3.5152e-03,
-5.6533e-02, 6.2109e-02, 5.2029e-02,
5.7971e-02, 5.1577e-02, 6.6318e-02,
-2.1669e-03, 7.7274e-02, -4.0609e-02,
2.8531e-02, -8.3960e-02, 1.3615e-02,
-1.1151e-02, -1.4162e-03, 5.6661e-02,
-8.0954e-02, -1.0600e-01, 4.3276e-02,
7.6762e-04, 3.1437e-02, -6.1084e-02,
-8.1119e-02, 2.1406e-01, 6.0836e-02,
4.8105e-02, -1.6263e-01, 9.2555e-03,
1.1060e-01, -2.1090e-01, 1.6435e-01,
-1.0248e-01, -1.1884e-01, -7.9929e-02,
5.9980e-02, 1.0271e-01, -1.1891e-02,
-7.5044e-02, -2.3655e-02, -5.2865e-02,
2.1542e-02, 2.7305e-04, 1.3508e-01,
-1.2317e-02, 9.0742e-02, -3.0079e-03,
-9.9020e-02, 1.5578e-01, -2.1482e-03,
-8.9029e-02, 1.8470e-01, 3.7571e-02,
-2.0394e-01, -1.3735e-01, 2.9648e-02,
-4.3016e-40, -7.3591e-40, -7.3773e-40,
-4.1239e-40, -8.6029e-41, -6.9504e-42,
-7.5082e-40, 1.2975e-40, 2.1462e-40,
-1.8967e-02, -1.4903e-01, 8.1452e-02,
1.2099e-01, -2.5524e-02, 1.3285e-02,
-1.3780e-01, -5.3359e-02, -3.1310e-02,
-1.8984e-02, 4.1962e-02, 1.0186e-01,
-1.0823e-01, 1.1079e-01, 7.8613e-02,
-1.4521e-01, -7.7509e-02, 1.8768e-02,
5.0613e-03, -3.0459e-02, -6.3055e-02,
4.4540e-02, 2.0135e-01, 9.6351e-02,
-1.9495e-02, -1.2314e-01, 1.1720e-02,
2.1739e-02, 5.2098e-02, -4.0453e-02,
-9.9983e-02, 4.7578e-02, -2.7862e-02,
-8.6565e-02, 1.5241e-01, -4.0462e-02,
4.0458e-02, -1.2871e-01, -4.3491e-02,
9.8981e-02, -1.3637e-01, 2.0092e-02,
1.5626e-01, -8.4550e-04, -2.5701e-02,
1.8511e-02, -1.0257e-01, -7.3238e-02,
-3.9802e-02, -1.6120e-02, -7.4068e-04,
-1.1377e-02, 9.7975e-03, -9.0342e-02,
-6.7152e-02, 1.0208e-01, 2.5234e-02,
-4.3687e-02, 2.5334e-01, 9.2712e-02,
3.7702e-01, 4.1450e-02, 1.9934e-02,
-5.4201e-39, -6.7158e-39, -7.5025e-39,
-5.2548e-39, -6.4829e-39, -7.2782e-39,
-4.9999e-39, -5.9599e-39, -6.0469e-39,
3.5890e-02, -7.3738e-02, 9.8899e-02,
3.3312e-02, 5.8231e-02, -2.1348e-01,
8.6289e-02, 5.0837e-02, -6.5613e-02,
7.0208e-02, 4.1424e-02, -6.0761e-02,
4.4654e-02, -3.3590e-02, -5.3044e-02,
1.2319e-01, -4.4666e-02, -8.8193e-02,
-9.0463e-02, -3.0083e-02, 6.8075e-02,
4.2531e-02, 4.3248e-01, 1.3480e-01,
9.2389e-02, 1.3683e-01, -2.6092e-01,
2.8925e-02, 2.3317e-01, 7.8128e-02,
6.3444e-02, 1.6291e-01, -3.8727e-03,
6.9107e-02, 6.8477e-03, 3.9528e-01,
3.8471e-02, 3.0745e-02, 2.8446e-02,
1.0625e-02, -2.4006e-01, -1.2490e-01,
-1.3002e-01, 2.0025e-01, 4.7618e-02,
-3.9705e-02, -1.2017e-02, -9.8790e-02,
-1.2798e-02, -2.7540e-01, -1.5138e-01,
-1.0290e-01, 5.0112e-02, -1.7391e-01,
-9.7079e-02, -2.2350e-03, -5.9211e-02,
-2.4728e-01, 4.3353e-01, -1.9306e-01,
-1.8039e-01, 1.2689e-01, 5.2103e-02,
-4.5547e-39, -7.8040e-39, 4.1196e-39,
1.5214e-39, 9.3494e-40, -3.9058e-39,
7.8718e-39, 7.1728e-39, 5.3609e-39
}
,
{
-9.4505e-02, -7.0477e-02, -1.5792e-04,
-2.3475e-01, 5.8849e-02, -6.8161e-02,
7.0658e-03, -1.0276e-01, 7.2471e-02,
-7.3820e-03, -3.0740e-02, -1.1131e-01,
2.8429e-02, -3.5750e-01, -8.4683e-02,
-5.0210e-02, -3.1096e-03, -2.3730e-02,
4.5756e-02, -3.6724e-01, -7.6317e-02,
3.8467e-01, 5.5354e-02, 1.6943e-01,
-4.9403e-02, 7.4709e-02, -3.0550e-02,
-7.5324e-03, -1.6910e-01, -1.6103e-01,
4.6314e-02, 1.2912e-01, -3.0488e-02,
2.6388e-02, 5.6925e-02, 6.4396e-02,
3.7748e-03, -2.1310e-02, 1.1410e-01,
-7.0164e-03, 1.8228e-02, -2.5920e-01,
6.8416e-02, 1.3998e-01, 1.3290e-01,
-3.8861e-02, 8.9898e-02, -3.6631e-03,
3.5528e-02, 1.1249e-01, 3.7018e-02,
-6.2334e-02, -4.8470e-02, -4.4094e-02,
3.1574e-02, -1.2162e-01, 1.9669e-01,
-4.6605e-03, 1.1887e-02, -1.1958e-01,
-1.0736e-01, 6.0131e-02, -1.2829e-02,
2.1305e-01, -8.4750e-02, -2.7028e-02,
-3.0351e-01, -6.4246e-03, -7.9128e-02,
1.3081e-01, 9.5878e-02, 1.6193e-02,
-5.8335e-02, -5.5968e-02, -2.6284e-03,
-7.2218e-02, -1.1661e-02, 1.9413e-03,
-1.6043e-01, 1.1388e-01, -3.6473e-02,
-2.4077e-02, 1.2210e-01, 1.5531e-02,
1.5074e-01, -4.5545e-01, 6.1004e-02,
-6.3948e-02, 3.9804e-02, -4.8822e-04,
1.3135e-01, 9.2392e-02, 8.8914e-02,
1.2941e-01, -3.6052e-01, 3.9571e-02,
-2.4838e-02, 7.0425e-02, -1.9016e-02,
2.7629e-02, -7.0648e-02, -2.6838e-02,
-2.1844e-02, -9.6184e-02, -3.3611e-02,
8.5938e-02, 5.2663e-02, 2.2938e-02,
-6.9909e-03, -3.9627e-03, -6.5162e-02,
-4.9296e-03, -4.0383e-02, 6.7670e-01,
1.5251e-02, 2.1000e-01, -1.9137e-01,
2.2825e-02, 1.6640e-02, 3.8147e-02,
7.1902e-02, -4.9821e-02, -6.5592e-03,
1.5826e-02, 2.1626e-02, 1.1646e-02,
1.5180e-02, 1.5664e-01, 9.8696e-03,
-7.2901e-02, -2.1818e-01, 9.2465e-02,
6.4349e-02, 6.0290e-02, -2.1094e-02,
2.0633e-02, 4.8808e-02, 1.4080e-02,
4.8083e-02, -1.5979e-01, -5.3634e-02,
6.5004e-02, 7.0317e-02, 1.9117e-02,
-4.3048e-02, 5.9627e-02, -1.5068e-02,
1.8861e-01, -2.6868e-01, 1.2789e-03,
1.1273e-01, -2.7796e-01, 4.9841e-02,
4.9008e-03, 1.8241e-02, 4.3449e-02,
2.1420e-02, -1.0299e-01, -1.6235e-01,
-1.9300e-02, -1.5121e-02, 2.0616e-03,
-2.7591e-01, 3.9622e-02, -5.0492e-02,
1.1866e-01, 5.5502e-01, -2.3622e-02,
-6.1204e-03, -7.4778e-03, 6.7961e-03,
2.4215e-02, 2.1643e-03, 1.1442e-01,
7.5326e-02, 1.4455e-01, 8.0497e-02,
6.6115e-02, 2.9762e-02, 2.8680e-02,
3.7784e-03, -2.2769e-02, 2.4529e-02,
-1.1441e-02, 9.8463e-02, -1.2761e-02,
1.0642e-02, 5.2871e-02, 1.9650e-01,
-2.2225e-02, 3.1504e-02, 8.5645e-03,
4.9125e-02, 1.4439e-01, 8.4573e-02,
1.0103e-02, 1.9097e-02, 4.5579e-03,
-2.5773e-02, -4.0984e-02, -1.5402e-01,
5.3050e-02, 1.5509e-01, -1.9040e-01,
3.7700e-02, 1.0632e-01, -2.2520e-02,
-5.6582e-02, -4.6040e-02, -5.7562e-03,
-3.4924e-01, 3.2933e-01, 5.5211e-02,
2.3230e-02, 8.5108e-02, 3.7448e-02,
1.4266e-02, -7.2016e-02, 4.5252e-03,
-7.0246e-02, 3.9142e-01, -1.9216e-02,
2.0536e-01, -3.5615e-01, 3.8009e-02,
1.2252e-02, -5.7966e-02, 9.2672e-02,
2.4225e-02, -1.0186e-01, -1.4219e-01,
-2.8815e-02, 1.3088e-02, -2.6031e-03,
-6.2341e-02, -1.1216e-01, -7.2122e-02,
1.1812e-01, 4.3493e-01, 4.3593e-02,
-1.3524e-02, 4.8679e-03, -1.0598e-02,
3.4904e-02, 5.5813e-02, 4.6811e-02,
8.0928e-02, 7.6607e-02, 6.3968e-02,
5.4647e-02, 2.8693e-02, 2.1957e-02,
-8.2725e-03, 5.4668e-02, -3.0533e-02,
-9.3953e-03, 1.5874e-01, -3.6093e-01,
5.6412e-03, 1.8977e-02, 2.0088e-01,
-1.9414e-02, 1.9088e-02, 1.4504e-02,
5.8462e-02, 6.2645e-02, 4.9884e-02,
6.6913e-03, 4.3639e-02, 1.5139e-02,
-2.1897e-02, -1.1436e-01, -5.0838e-02,
7.1176e-02, 8.4667e-02, -1.4480e-01,
3.7676e-02, 1.0840e-01, -2.6417e-02,
-4.7584e-02, -4.0524e-02, 6.3032e-03,
-2.4822e-01, 2.4635e-01, 5.5942e-03,
-1.3347e-02, 1.0515e-01, 4.2549e-02,
-1.2380e-01, 4.1074e-02, 1.2608e-02,
-1.2042e-01, 2.9516e-01, 2.8380e-03,
5.1930e-01, -1.6498e-01, 5.7152e-02,
-6.5519e-02, 1.1001e-01, 2.8943e-02,
1.0854e-01, -6.0107e-02, -1.6730e-01,
-4.4417e-02, 3.4347e-02, -3.3756e-02,
2.0694e-01, 3.3047e-01, -9.4497e-02,
-2.1977e-01, 4.6614e-02, 1.2201e-01,
-2.9541e-02, 1.8900e-01, -1.8391e-01,
2.0064e-02, -3.2480e-02, -8.9041e-03,
-5.6385e-02, -6.4531e-02, 1.2879e-02,
-3.2499e-02, 1.0883e-02, 7.3564e-03,
1.9828e-02, -2.3278e-01, -4.3789e-03,
9.7669e-02, 1.3008e-01, -1.0405e-01,
2.2618e-02, -2.5495e-01, -1.0718e-01,
4.3524e-02, -7.3127e-02, 8.2424e-02,
-5.0193e-02, 4.0634e-03, 4.0696e-02,
2.7419e-02, 1.8353e-01, 9.2117e-02,
-7.4918e-02, 1.0602e-01, -3.4752e-02,
-1.3331e-01, -2.9583e-02, -5.2197e-03,
-3.7852e-02, 1.5998e-01, 1.5078e-03,
-5.6512e-02, 1.3378e-01, 1.4512e-02,
4.5255e-02, 2.4702e-01, -2.4848e-02,
-1.7526e-01, 1.5532e-01, 8.6686e-02,
3.1486e-02, -2.3247e-02, 9.7320e-03,
-5.2106e-01, 4.7937e-02, 4.1614e-02,
5.5436e-02, -2.0432e-01, 1.2444e-02,
-5.6792e-02, -5.5632e-02, 5.7612e-02,
-6.0248e-04, 4.9770e-02, -6.7956e-02,
1.3389e-02, -9.4141e-03, -7.3497e-03,
-4.6361e-01, 2.7450e-01, -8.2210e-02,
-2.6737e-01, -6.6114e-02, 6.3568e-02,
1.6910e-02, 1.4456e-01, -9.0081e-02,
8.8278e-03, 2.1776e-02, 8.7710e-03,
-2.3378e-02, -4.3907e-02, -3.6751e-02,
-2.4694e-03, -6.0419e-03, 3.0840e-02,
-1.6968e-02, -8.2266e-02, -1.0049e-01,
3.4429e-02, 1.0960e-01, 3.8355e-01,
-4.0301e-04, -3.1089e-02, -2.1373e-02,
-2.4172e-02, 4.6432e-02, 8.0742e-03,
-2.3134e-02, 1.7789e-02, 2.7136e-02,
3.0729e-02, 6.9008e-03, 1.2822e-02,
3.5043e-02, -6.1749e-02, -1.2565e-02,
-1.0354e-02, -2.6515e-03, 4.5632e-03,
-5.9818e-02, -9.7686e-04, -6.6467e-03,
-5.0833e-01, 1.8474e-02, 1.3598e-02,
3.6287e-01, 1.3698e-01, -1.2806e-02,
-2.8618e-02, -2.9128e-02, 2.9855e-02,
8.1243e-02, 4.7414e-02, -4.7434e-02,
-3.3738e-02, -3.4926e-01, 1.7786e-02,
1.0056e-01, -5.7937e-02, -1.8308e-02,
1.8214e-02, -1.9519e-01, 2.2152e-02,
-7.3543e-02, 2.0786e-01, -5.8196e-02,
3.9396e-02, -4.5349e-02, 1.5748e-02,
-5.4604e-03, 4.5777e-01, 1.7295e-01,
-2.0570e-01, -3.0970e-01, -1.9075e-01,
7.6751e-02, -1.3099e-01, 6.1278e-02,
6.0222e-02, 5.4418e-02, 1.2259e-01,
3.2160e-02, 8.5146e-03, 3.4578e-02,
-5.4391e-02, -2.5285e-02, 1.0251e-02,
-3.2763e-02, 7.9163e-02, -7.5136e-02,
1.8545e-02, -2.1972e-02, 1.3887e+00,
-1.2402e-03, -2.5679e-01, 7.2392e-02,
4.9692e-03, 1.7034e-02, 4.7043e-02,
1.2093e-02, -3.1230e-02, -8.2613e-03,
-7.8701e-03, -2.3516e-03, -7.2487e-04,
6.8495e-02, -5.2837e-02, -2.2482e-01,
1.3259e-02, 4.8009e-01, -4.0940e-02,
-4.1547e-02, -2.8753e-02, -5.2579e-03,
-1.7152e-01, -3.3676e-02, 1.5080e-02,
8.6014e-02, 7.9239e-02, 4.2196e-02,
-9.2870e-02, -1.5913e-02, -6.5804e-03,
4.0364e-02, 2.4914e-02, -1.4638e-02,
8.8705e-03, 2.8037e-01, 3.9890e-02,
1.1638e-01, 2.9467e-01, -4.3518e-03,
7.1091e-02, -2.2378e-01, 4.7315e-02,
3.8006e-02, -2.0246e-01, -3.8679e-02,
-5.8004e-02, 5.8991e-02, -6.2149e-03,
-1.3034e-01, 1.5540e-01, -5.2558e-02,
8.1594e-02, 3.5570e-01, 2.1220e-02,
1.4977e-02, 2.4493e-03, -4.0627e-02,
1.1402e-01, 6.6962e-02, 1.1150e-01,
1.1824e-01, 1.1492e-01, 1.1219e-01,
6.6067e-02, 6.9639e-02, -8.1836e-02,
-2.7144e-02, 1.4677e-01, -5.9261e-02,
4.4573e-03, 2.6235e-01, -7.4379e-01,
-8.3569e-03, 9.4465e-02, -6.5653e-03,
2.1095e-02, -1.8853e-02, 6.7972e-02,
1.2957e-01, 3.0122e-02, -1.0061e-02,
-3.4832e-02, 8.5404e-02, 5.7663e-02,
-5.0400e-02, -1.2050e-01, -2.3344e-01,
1.4977e-01, 7.8806e-02, 6.0771e-03,
5.6483e-02, 6.3927e-02, -5.8376e-03,
-2.8124e-01, 5.2581e-02, -1.3918e-04,
-1.4341e-01, 3.6558e-01, 4.7332e-02,
-3.9089e-02, 8.4188e-02, 2.7058e-02
}
};
static __device__ __constant__ const float HDNL2biasL[8][8] =
{
{
7.2678e-02, 8.5350e-03, 5.0400e-02, 2.6268e-02, 6.2434e-02, 1.0483e-01, -7.1650e-39, 1.0062e-01
}
,
{
-4.9844e-39, -1.8567e-39, 6.0627e-04, -1.9234e-38, 1.8331e-02, -1.1364e-01, -8.3962e-03, -1.7372e-04
}
,
{
-0.0091, -0.0055, 0.0237, 0.0093, -0.0479, 0.0188, -0.0034, 0.0399
}
,
{
6.5694e-03, -2.2259e-01, -1.1226e-02, -8.0327e-02, -1.0615e-36, 1.0402e-02, 7.6246e-03, -6.5940e-02
}
,
{
5.0711e-02, 7.1911e-02, 2.5293e-02, -1.5608e-02, 5.3835e-02, -1.6967e-38, 2.2243e-02, 3.2742e-02
}
,
{
1.5629e-02, 2.9703e-02, 2.6412e-02, 1.2301e-02, 1.8654e-01, -7.2260e-03, 2.4613e-02, -3.1853e-38
}
,
{
-0.0030, -0.0123, 0.0348, 0.0277, -0.0152, 0.0005, -0.0124, -0.0209
}
,
{
7.4856e-03, 7.2931e-04, 8.3015e-03, 6.4820e-03, 2.4008e-04, 7.0377e-06, 1.7948e-03, 8.9869e-03
}
};
static __device__ __constant__ const float HDNL2kernelsL10[4 * 8] =
{
0.4240, 0.4165,
0.1648, 0.1909,
-0.0985, -0.4455,
0.4639, -0.0533,
-0.1368, 0.4413,
0.2539, 0.3294,
0.2458, -0.3256,
-0.0479, 0.3200,
-0.3977, -0.0422,
-0.2736, 0.1053,
0.3902, 0.0594,
-0.0721, -0.2988,
0.0495, 0.1309,
-0.1703, 0.0033,
0.3061, 0.1827,
0.2443, -0.1259
};
static __device__ __constant__ const float HDNL3kernelsL1[9 * 8] =
{
-0.0461, 0.1274, 0.2976,
-0.0393, -0.1251, 0.2527,
0.0791, 0.0600, -0.0303,
-0.0520, -0.5039, -0.3305,
-0.0115, 0.0456, 0.4370,
0.0601, 0.0780, 0.3106,
-0.0017, -0.0018, -0.0017,
-0.0017, -0.0018, -0.0018,
-0.0017, -0.0017, -0.0017,
0.2666, 0.1687, 0.2303,
-0.1901, 0.3825, 0.3024,
0.1811, 0.0581, 0.2080,
-0.1246, 0.0155, -0.4075,
0.1156, 0.5929, 0.1449,
-0.1080, -0.0171, -0.0516,
-0.0817, 0.2247, 0.0472,
0.0394, 0.1085, 0.1435,
-0.0480, -0.0135, -0.0606,
-0.0083, 0.2045, 0.1056,
-0.2239, 0.2823, -0.1926,
0.2581, 0.1362, -0.1914,
-0.0833, 0.0702, 0.0234,
0.3616, 0.3789, -0.1840,
0.0128, 0.1347, -0.0187
};
static __device__ __constant__ const float HDNL3biasL1[8] =
{
-0.1329, -0.0431, -0.0031, -0.0129, 0.2294, -0.2595, -0.2370, -0.0499
};
static __device__ const float HDNL3kernelsL[8][9 * 8 * 8] =
{
{
1.4090e-01, -1.8985e-02, -6.8589e-02,
6.6491e-02, 1.4360e-02, 8.5223e-02,
1.8782e-01, 9.8042e-02, -3.4558e-02,
2.5606e-01, 2.2027e-01, 2.7603e-01,
1.9424e-01, 3.4537e-02, 9.5975e-02,
1.1223e-02, -4.3377e-01, -1.4760e-01,
-3.4293e-40, -5.5421e-40, -4.4763e-41,
-6.3322e-40, -3.1495e-40, -7.8264e-41,
-1.5375e-40, -3.3656e-40, 5.2441e-40,
1.2413e-01, 1.5682e-01, 1.1465e-01,
1.6683e-02, 7.8382e-02, 1.0110e-01,
1.4902e-01, 1.3608e-01, 1.1674e-01,
-6.5160e-02, 7.7748e-02, 2.1773e-02,
2.0652e-02, 2.7245e-01, 1.0297e-01,
-2.0953e-02, 6.1685e-02, 4.4128e-02,
6.1538e-02, -1.9746e-02, -1.2785e-02,
2.5931e-02, 1.2740e-01, 9.0033e-02,
8.6448e-02, 2.0684e-01, 9.8063e-02,
-7.8384e-03, 6.3277e-02, 7.6751e-03,
3.5956e-02, 1.0555e-01, 4.2728e-02,
7.1578e-02, 1.3253e-01, 1.1171e-01,
-2.7538e-02, 1.5836e-01, 1.0014e-01,
-4.9113e-02, 1.6911e-01, 2.7329e-01,
7.9170e-03, 9.5440e-02, 1.3922e-01,
8.0151e-02, 4.3438e-02, 5.5314e-02,
3.4896e-02, 1.6816e-01, -4.5783e-03,
-1.4579e-03, 2.0493e-01, 2.6238e-02,
2.6499e-02, 3.9490e-01, -1.1582e-02,
3.5790e-01, 1.4317e-01, -2.1775e-01,
4.1794e-03, -3.2513e-01, -1.6729e-01,
3.4040e-41, -6.2960e-42, -1.0067e-40,
5.5978e-41, -1.2353e-40, -1.1347e-40,
5.4572e-40, -6.4384e-40, -4.1234e-40,
-9.3690e-02, 1.7765e-01, 1.1275e-01,
9.1159e-03, 1.7375e-01, 1.1427e-01,
-7.8385e-02, 1.5658e-01, -3.8399e-02,
-1.0756e-01, 5.9943e-02, -6.7273e-02,
-1.1117e-01, 1.5267e-01, 1.1563e-01,
-1.2964e-01, -3.8604e-02, -2.4532e-02,
1.6324e-02, 1.3112e-01, 6.1679e-03,
-7.7703e-03, 2.6311e-01, 8.9427e-02,
-2.8948e-02, 1.9341e-01, 4.4339e-02,
6.4559e-03, -6.8885e-02, 1.1481e-01,
-1.0665e-01, 3.8613e-02, 7.0410e-02,
-6.1680e-02, -1.7374e-02, 9.5475e-03,
-4.0081e-02, -3.1549e-02, 2.8311e-01,
-1.2178e-01, -1.3848e-01, 1.7416e-01,
-8.1756e-02, -1.7718e-01, 7.9533e-02,
-3.1299e-03, -3.2305e-03, -3.2094e-03,
-3.1548e-03, -3.2553e-03, -3.2453e-03,
-3.1459e-03, -3.2278e-03, -3.2076e-03,
-3.6554e-05, -3.6715e-05, -3.1284e-05,
-1.4927e-05, -1.4357e-05, -1.2185e-05,
-1.5771e-09, -1.1439e-09, -6.4952e-10,
3.7723e-40, 4.9166e-40, -2.1946e-40,
-4.7599e-40, -4.3356e-40, -8.3928e-41,
2.6127e-40, 4.8634e-40, 2.7720e-40,
-5.4972e-03, -5.6409e-03, -5.6919e-03,
-5.5818e-03, -5.7079e-03, -5.7542e-03,
-5.6338e-03, -5.7437e-03, -5.7600e-03,
-3.7940e-03, -3.8853e-03, -3.8693e-03,
-3.8995e-03, -3.9616e-03, -3.8945e-03,
-3.8438e-03, -3.9156e-03, -3.8269e-03,
-7.2342e-05, -7.8682e-05, -4.7701e-05,
-1.1126e-04, -1.1918e-04, -7.8931e-05,
-1.1644e-04, -1.2418e-04, -8.2350e-05,
-2.3881e-04, -3.7971e-04, -3.9448e-04,
-2.4112e-04, -3.8395e-04, -4.0189e-04,
-2.3451e-04, -3.7525e-04, -3.9222e-04,
-3.9853e-03, -4.0748e-03, -4.1134e-03,
-4.0685e-03, -4.1456e-03, -4.1548e-03,
-4.0547e-03, -4.1388e-03, -4.1357e-03,
5.3008e-02, 2.2252e-02, -7.1158e-02,
-6.6411e-02, -3.0015e-02, -2.2526e-02,
1.2259e-01, -6.2488e-02, 5.6190e-02,
1.5981e-02, -7.6832e-02, 1.7908e-02,
2.7618e-01, 5.4054e-02, 8.7282e-02,
1.5212e-02, -1.1097e-01, -2.2265e-02,
-6.8532e-41, -6.0539e-40, 4.6269e-40,
-2.9221e-40, -3.8468e-40, -4.6656e-40,
6.4572e-40, -6.1625e-40, 6.4545e-40,
3.5920e-02, 9.0955e-02, -1.7626e-02,
4.7826e-02, 1.8832e-01, -4.4043e-02,
-3.8405e-02, 5.9176e-02, 6.8182e-02,
3.7657e-03, 2.6441e-02, -2.5585e-01,
1.0969e-01, 2.3914e-01, 3.5120e-02,
-1.6252e-01, 3.4371e-02, -2.7501e-01,
4.9289e-02, 2.2088e-02, -1.4588e-02,
1.6384e-01, -8.1421e-03, -6.9613e-02,
1.0820e-01, 1.1137e-01, 7.2648e-03,
1.5243e-01, 1.3659e-01, 2.7553e-02,
1.3966e-01, 1.1019e-01, 1.9817e-02,
1.1420e-01, -5.1386e-03, 6.8617e-03,
-1.3264e-02, 2.1508e-01, 4.8430e-02,
5.1149e-02, 2.9165e-01, 2.8077e-01,
2.9288e-03, 9.0611e-02, 8.1538e-02,
-1.1812e-01, 1.5603e-02, 1.1571e-01,
-3.4958e-02, -1.6688e-03, -4.6619e-02,
-1.0417e-02, -3.1802e-02, 1.8357e-02,
1.1064e-01, 1.8397e-01, 4.8449e-02,
-8.3336e-03, 1.6029e-01, 3.9490e-02,
-4.0959e-01, -2.6134e-01, 2.0766e-02,
6.6073e-41, -6.7490e-40, -5.1131e-41,
-4.3320e-41, -3.7194e-40, 2.0674e-40,
-5.2359e-40, -3.4006e-40, -4.9257e-40,
-4.7260e-02, 2.8518e-03, -2.7764e-01,
6.9182e-03, 1.3938e-01, -1.3162e-01,
-6.0901e-03, 1.0339e-01, 6.0419e-02,
-1.4449e-01, -3.2043e-02, -9.1466e-02,
-1.4022e-02, 3.1703e-01, 5.8166e-02,
-1.5243e-02, 1.4521e-01, 2.0790e-04,
-1.0255e-01, -7.8766e-02, -1.2395e-01,
7.9894e-03, 3.7079e-03, -3.2134e-02,
1.1663e-01, 1.4808e-01, 2.0431e-01,
7.4026e-02, 6.9632e-02, 1.7156e-01,
-3.0385e-02, 2.3218e-01, 7.3855e-02,
-8.8530e-02, -5.9224e-02, 2.3431e-02,
1.4596e-02, 3.2442e-02, -1.1308e-01,
-6.3734e-02, 2.5270e-01, 7.8081e-02,
1.0468e-02, 1.5473e-01, 3.8676e-02,
-1.0842e-01, 8.6778e-03, 1.4985e-01,
8.1757e-03, -8.2109e-02, 8.5471e-02,
-2.1437e-01, -6.1173e-02, 4.8163e-02,
2.8965e-01, 1.9748e-01, 4.2651e-02,
1.8196e-01, 3.3932e-01, 3.9594e-01,
3.9657e-01, 4.2167e-01, 2.9290e-01,
7.4011e-41, 6.5220e-40, -5.9885e-40,
7.4011e-41, 6.2047e-40, -7.1533e-40,
4.1950e-40, -1.1886e-40, -5.9922e-40,
1.9662e-01, 2.1402e-01, 3.1041e-02,
-1.1079e-01, 1.3361e-01, -2.1608e-01,
-1.7962e-01, -8.0576e-02, -3.1277e-01,
1.0620e-02, 2.4024e-01, 1.0657e-01,
-7.9906e-05, 2.8760e-01, 4.1231e-02,
-1.3261e-02, -1.0868e-01, -1.1267e-01,
-1.0659e-02, -2.6051e-02, -4.5389e-02,
5.8261e-02, 4.0288e-02, 6.7050e-02,
-2.6462e-01, -1.7846e-01, -1.0002e-01,
-6.2904e-02, 1.5275e-01, 4.4282e-03,
1.4446e-01, 1.1814e-01, -8.0349e-02,
2.0331e-02, 3.3014e-02, 1.2710e-01,
1.6084e-01, 3.8819e-01, 1.0854e-01,
-6.8126e-03, 3.5673e-01, 1.8938e-01,
-1.1660e-01, -5.7694e-02, -2.9194e-01,
1.2775e-02, -3.2769e-02, 1.7228e-02,
1.8324e-01, 1.1983e-01, -1.6944e-02,
1.0593e-01, 1.3451e-01, 5.2536e-02,
1.9147e-01, 1.3875e-01, 1.0298e-01,
-2.0871e-01, -1.7197e-01, 1.1342e-01,
-1.7581e-01, 4.0972e-02, 2.9796e-01,
3.2588e-40, -4.3663e-40, -2.6518e-40,
3.2588e-40, -4.3663e-40, -2.6518e-40,
4.1600e-40, -4.4350e-40, -4.8744e-41,
3.7289e-02, 8.1769e-03, 1.7059e-02,
3.7735e-02, 6.6571e-02, -6.6137e-02,
-5.8890e-02, -7.7019e-03, -6.2128e-02,
-4.0751e-02, 1.1710e-01, -1.1586e-01,
-1.2999e-01, -1.6384e-02, -2.1858e-01,
-2.8028e-01, -6.0443e-02, -1.1880e-01,
1.8152e-01, 1.5364e-01, 1.1781e-01,
2.9010e-01, 2.4612e-01, 1.3170e-01,
1.9022e-01, 1.8117e-01, 1.6483e-01,
9.3342e-02, 2.6607e-01, 1.4679e-01,
1.6729e-01, 2.5374e-01, 1.1954e-01,
6.3258e-02, 1.0557e-01, 6.7221e-02,
-5.2017e-02, 1.9628e-01, 1.7243e-01,
-3.2667e-02, 1.5756e-01, 1.9347e-01,
-9.5252e-02, -3.7525e-02, -3.4543e-04,
-4.9759e-02, 4.0383e-02, -2.0231e-02,
-1.1776e-01, 3.4182e-02, 3.6720e-02,
-1.4822e-02, -4.1658e-02, -1.3729e-02,
-1.9215e-02, 2.4427e-02, -9.0638e-02,
-1.4438e-01, -2.1785e-01, -5.1789e-02,
-2.0279e-01, -3.3918e-01, -1.6871e-01,
6.1262e-41, 2.4066e-40, 6.6851e-40,
5.3430e-40, -3.2335e-40, -3.7400e-40,
-6.3256e-40, -4.7491e-40, 2.2854e-40,
-6.8701e-03, -1.4849e-02, 8.6332e-02,
1.1686e-01, 1.8346e-01, 1.8797e-01,
-2.3251e-02, 7.3973e-02, 1.0532e-01,
-6.1838e-02, 5.6667e-02, 8.1584e-02,
-3.8900e-02, 7.0927e-02, 9.5606e-02,
-4.5098e-02, -1.0829e-01, -1.2224e-01,
3.5047e-03, 3.2898e-02, 3.5622e-02,
1.6170e-02, 4.3721e-02, 9.7496e-02,
2.3445e-03, 6.0417e-02, 1.3482e-01,
6.0570e-02, -5.7139e-03, -1.0883e-03,
2.2701e-02, -2.9113e-02, 7.9178e-03,
8.1214e-02, -4.1408e-02, 1.3616e-02,
-4.7985e-02, 1.0304e-02, -3.3236e-02,
-1.6334e-02, -8.1538e-02, 1.8629e-02,
-9.3720e-02, -1.2920e-01, -4.0836e-02
}
,
{
1.0443e-01, 1.5461e-01, -1.4743e-01,
1.6716e-01, 1.0532e-01, -2.3088e-01,
1.0218e-01, 1.2393e-01, -9.6646e-02,
1.7659e-01, -7.3279e-02, 1.9627e-02,
1.7721e-01, -1.4329e-01, -1.2533e-01,
1.6551e-01, -3.4616e-01, 9.5618e-02,
4.5827e-09, 9.3413e-09, 1.7015e-08,
1.2245e-08, 9.9727e-09, 6.7108e-09,
1.9612e-07, 3.9479e-08, 1.1537e-09,
2.2127e-02, 9.2715e-02, -1.2150e-01,
7.5652e-02, 1.1548e-01, -1.2420e-01,
-1.0693e-03, -7.2839e-02, -1.9664e-01,
1.4466e-01, -1.8552e-03, -1.3575e-01,
2.0699e-01, 8.0396e-02, -1.9651e-01,
-4.7075e-02, -5.1259e-02, -8.2593e-02,
-2.2385e-01, 3.0066e-03, -2.2659e-02,
6.1827e-02, 2.5331e-02, -5.3898e-02,
2.7091e-01, 1.0991e-01, -3.3600e-01,
-8.9499e-02, -9.3821e-03, 2.2675e-02,
1.1213e-01, 1.3276e-01, 2.0368e-02,
6.5408e-02, 4.1598e-02, -4.7917e-02,
6.0740e-03, 1.2236e-04, -1.0659e-01,
-1.8072e-02, -9.1082e-02, -9.0414e-02,
4.9052e-02, -1.4298e-01, -3.9721e-02,
1.1840e-01, 2.2503e-01, 2.4587e-02,
9.3023e-02, 6.9650e-02, 1.6798e-01,
-1.5640e-03, 1.6300e-02, 6.3585e-02,
1.4431e-01, 3.7885e-02, 1.6692e-02,
1.7345e-01, 7.2315e-02, 1.8942e-02,
1.1081e-01, 8.2973e-02, -9.7717e-02,
-5.2264e-03, -5.2641e-03, -5.2727e-03,
-5.2809e-03, -5.3125e-03, -5.3153e-03,
-5.2915e-03, -5.3251e-03, -5.3231e-03,
6.0008e-02, 2.0268e-01, 1.3396e-01,
-2.5202e-03, -1.7750e-02, -1.2019e-02,
1.1806e-01, -2.2306e-02, 3.6464e-02,
7.9324e-02, 3.1883e-02, 1.5483e-02,
-4.3537e-02, 1.2204e-02, 1.8905e-02,
-8.1581e-02, -1.1307e-01, -6.0718e-02,
-2.4865e-01, -1.0199e-01, 1.9886e-02,
-1.0519e-02, 6.9972e-02, 4.8012e-02,
-1.5282e-02, 1.1979e-01, 8.7968e-02,
-3.6752e-02, 1.9523e-02, 7.1321e-02,
-5.8295e-02, 5.3242e-02, 1.2773e-01,
-7.9671e-02, 8.3249e-04, 7.4904e-02,
1.1792e-01, 2.2135e-03, -9.0963e-03,
-2.8356e-03, -4.2661e-02, 6.9497e-02,
9.3561e-02, 1.0475e-01, 5.4745e-02,
-8.5901e-02, -2.1969e-01, -1.5572e-01,
3.6473e-02, 1.1097e-01, -2.6830e-02,
1.2199e-02, 1.8917e-01, 1.1906e-01,
1.0664e-01, -2.7005e-01, 1.5492e-01,
-4.1771e-02, -1.6580e-01, 2.9234e-02,
-1.9854e-02, 2.1436e-01, -1.1100e-01,
4.5382e-04, 4.2085e-04, 5.6852e-04,
3.4951e-04, 3.7354e-04, 3.2786e-04,
2.0790e-04, 2.8606e-04, 3.2415e-04,
-1.5500e-02, 2.2865e-02, -3.0070e-01,
1.8467e-01, 2.4899e-01, 1.4812e-02,
-1.2318e-01, 2.3175e-01, 7.2244e-02,
1.6713e-01, 1.9089e-02, -2.7494e-01,
1.0202e-01, 2.9200e-01, -3.6055e-03,
1.3265e-01, 2.2551e-01, 1.9897e-01,
-3.9474e-02, 1.6262e-01, 1.6726e-01,
-8.6222e-02, 2.0573e-01, -7.3247e-01,
-9.5391e-02, 3.8933e-01, 1.5861e-01,
-1.2202e-01, -6.4735e-02, -1.1762e-01,
-2.2427e-02, -1.9171e-01, -1.6092e-01,
3.2356e-01, -2.2234e-01, -1.3743e-01,
-1.1493e-01, -2.4936e-02, 2.9212e-02,
-9.8112e-02, -1.8021e-02, -1.0507e-01,
-1.0168e-01, 1.1759e-01, -9.8203e-02,
-2.8871e-02, 1.3249e-01, 7.8378e-02,
-1.1012e-01, -4.0596e-02, 5.4202e-02,
4.9022e-02, -1.1744e-01, 9.8888e-02,
1.3343e-02, 1.4358e-01, -8.7142e-02,
1.9952e-01, 3.3708e-02, 2.0721e-02,
2.6527e-02, -2.3822e-01, 2.4706e-01,
-3.2750e-04, -2.8475e-04, -6.3494e-05,
-2.2378e-04, -1.8046e-04, -1.9242e-05,
-4.2124e-05, -2.2062e-05, 4.5500e-07,
1.1692e-01, 4.0366e-01, -1.8709e-02,
8.2700e-02, 1.7884e-01, -1.3520e-01,
3.7758e-02, 3.7048e-02, -2.8109e-01,
-2.3438e-01, 5.9423e-02, -1.7300e-01,
1.0343e-02, 7.2307e-02, -4.3852e-01,
-5.7429e-02, -4.9136e-02, -8.0327e-02,
8.1094e-02, 2.9118e-02, 1.6677e-01,
1.2155e-01, 6.5358e-01, 2.4544e-01,
3.1163e-02, 3.7463e-02, -2.6613e-01,
1.2723e-01, 1.2541e-01, 1.4319e-02,
1.9055e-01, -5.7441e-02, 1.1146e-01,
-1.0690e-02, -1.7567e-01, -1.2238e-01,
-2.0879e-01, -6.5278e-02, -7.9327e-02,
-1.6564e-01, -1.3659e-01, -2.6231e-01,
-3.1916e-01, -2.6553e-01, -9.8647e-02,
-1.0617e-01, 1.2782e-01, -2.1053e-02,
-1.2329e-01, 1.4952e-01, -1.7466e-02,
-1.6969e-01, 3.6980e-02, -6.7732e-02,
-3.1220e-02, 4.0615e-02, -1.5251e-01,
-2.0017e-01, 2.2421e-01, -2.5682e-02,
-6.5873e-02, 1.8346e-01, 1.2982e-02,
1.4021e-06, -1.6929e-05, -8.4696e-05,
1.9580e-05, 2.9943e-06, 3.0084e-06,
2.0769e-04, 1.4661e-05, 2.9503e-06,
-1.4485e-01, 1.8841e-01, -1.7954e-01,
2.1551e-01, 2.2601e-01, -8.6689e-03,
8.6926e-02, -6.8989e-02, -1.2683e-01,
-8.7712e-02, 6.3176e-02, 1.1983e-01,
1.0790e-01, 6.6418e-02, 6.5849e-02,
1.2483e-01, 1.2428e-01, 4.4994e-02,
1.5139e-01, -1.2116e-01, -3.5497e-01,
-6.1889e-02, 3.4088e-01, 1.3148e-01,
-1.6478e-01, 4.4477e-02, -1.1979e-01,
3.8343e-02, 1.7992e-01, 3.6790e-01,
3.0426e-01, 1.1235e-01, 4.9815e-01,
2.6290e-01, 1.9703e-01, 1.5881e-01,
-6.4678e-03, 2.4401e-01, 1.9266e-01,
-1.4089e-01, 1.2323e-01, 4.4340e-02,
-8.8856e-02, 8.4036e-02, -9.8488e-02,
-1.7377e-03, -1.7654e-03, -1.7223e-03,
-1.7651e-03, -1.7919e-03, -1.7491e-03,
-1.7172e-03, -1.7446e-03, -1.7041e-03,
-3.0384e-04, -2.9297e-04, -2.4838e-04,
-3.2961e-04, -3.1678e-04, -2.7009e-04,
-3.1665e-04, -3.0492e-04, -2.6122e-04,
3.7109e-40, -3.7915e-40, -5.2536e-40,
5.8286e-41, -5.6108e-40, 4.3331e-40,
-3.0184e-42, -4.8987e-40, -5.1788e-40,
-4.0457e-04, -4.3257e-04, -4.1616e-04,
-4.2268e-04, -4.5118e-04, -4.3407e-04,
-3.9446e-04, -4.2199e-04, -4.0650e-04,
-1.1253e-16, -1.1328e-14, -2.0489e-14,
-3.0346e-19, -1.7189e-16, -4.5141e-16,
-2.4957e-30, -1.8191e-23, -3.5882e-22,
-3.1610e-36, -1.7544e-24, -2.2187e-21,
-4.2887e-19, -1.5526e-15, -1.5160e-14,
-1.7750e-16, -6.8066e-14, -3.3764e-13,
-6.9570e-24, -5.1139e-23, -2.9335e-23,
-1.9091e-22, -1.0323e-21, -4.5931e-22,
-2.0010e-22, -9.3710e-22, -3.5622e-22,
-2.9470e-04, -2.9081e-04, -2.5958e-04,
-3.2290e-04, -3.1810e-04, -2.8461e-04,
-3.1795e-04, -3.1356e-04, -2.8121e-04,
6.1623e-02, 1.7057e-01, 8.0478e-02,
1.2624e-01, 1.8468e-01, 2.1901e-02,
7.6033e-02, 1.3455e-01, 8.4037e-02,
8.4434e-02, -1.7069e-02, -7.8318e-02,
4.9244e-02, 4.4782e-02, -6.9747e-02,
1.2915e-01, 1.1453e-01, -6.5243e-02,
-5.0985e-03, -5.1407e-03, -5.1687e-03,
-5.1185e-03, -5.1511e-03, -5.1712e-03,
-5.0986e-03, -5.1272e-03, -5.1409e-03,
-1.8186e-02, 6.2680e-02, 3.3235e-02,
1.3398e-02, 1.6497e-01, 4.3523e-02,
-2.4101e-02, 1.3316e-01, 1.8373e-02,
-6.2677e-04, 6.5026e-03, 2.5948e-02,
6.6542e-02, 1.2352e-01, 1.5155e-02,
-8.6237e-02, -2.0907e-02, 1.0237e-02,
-1.7807e-01, -8.6196e-02, -3.2408e-02,
-8.1946e-03, -1.3957e-02, -1.6733e-01,
2.6269e-02, 1.6817e-01, 9.4029e-02,
3.4005e-02, -1.2833e-02, -1.2038e-01,
-4.8950e-02, 3.9857e-02, 1.4048e-02,
-6.4758e-02, 9.9603e-02, 1.0748e-01,
-1.0850e-02, 9.8875e-02, -4.4439e-02,
9.1219e-02, 6.6400e-02, -6.7693e-02,
5.3318e-02, 1.1838e-02, -1.5164e-01,
-5.8568e-02, 1.1249e-01, -3.8286e-02,
-7.1122e-02, 9.5799e-02, 3.8521e-02,
-1.3846e-01, 1.4167e-01, -3.5500e-03,
-1.0343e-01, -3.3025e-02, 3.7186e-02,
-2.0769e-03, 1.3558e-01, -1.3009e-01,
1.0167e-02, 1.5358e-02, -9.8009e-02,
2.4123e-05, -1.1800e-05, -1.4180e-04,
3.5217e-05, -6.3838e-06, -1.2243e-04,
8.5525e-05, 2.1599e-06, -5.3290e-05,
-1.4471e-01, 2.0111e-02, -1.2449e-01,
5.3368e-02, 3.2918e-01, 1.4034e-01,
-1.1833e-01, -1.9225e-02, -1.2658e-01,
-2.6966e-01, 1.1751e-01, 9.7072e-02,
-1.9929e-01, 9.7986e-02, -5.1240e-02,
-9.5073e-02, -6.8070e-02, -2.1318e-01,
9.5305e-02, -4.0551e-02, -1.0936e-01,
5.2687e-02, 4.5340e-01, 2.3531e-01,
-1.3385e-02, 1.5922e-01, -1.8371e-01,
-1.2203e-01, -7.2567e-02, -3.0000e-01,
-3.4356e-02, -1.3471e-01, -9.0995e-02,
-2.5230e-01, -2.4846e-01, -1.8529e-01,
-1.6962e-01, 1.0905e-01, 1.1557e-01,
-1.4405e-01, 8.9191e-02, 1.1715e-01,
-1.3237e-01, 5.2092e-02, -1.2227e-01
}
,
{
2.0013e-01, 2.2105e-01, 1.9196e-01,
6.8158e-02, 1.7154e-01, -8.6677e-02,
9.2652e-02, 1.0789e-01, 1.6745e-01,
-2.9254e-01, -7.6815e-02, 5.8812e-02,
-4.6466e-02, 1.3941e-02, 2.3353e-01,
-1.5033e-01, 7.5167e-02, 1.4433e-01,
2.8008e-02, 3.1625e-01, 3.2877e-02,
-5.8835e-02, -1.7305e-01, -6.1558e-02,
-1.2227e-01, 3.9931e-02, 3.0300e-02,
2.3004e-01, 4.1834e-02, -5.7790e-02,
-2.2861e-01, 2.9314e-01, 1.6884e-01,
-2.8009e-02, 4.7550e-02, -4.4542e-02,
-2.4674e-01, -1.5483e-01, 3.2653e-02,
-2.1574e-01, 3.1083e-01, -1.4025e-03,
1.7354e-02, 5.6417e-02, 1.0844e-01,
-4.2681e-40, 4.5893e-42, -7.4234e-40,
1.7665e-40, 4.0151e-40, 4.6269e-40,
2.5452e-40, -7.0179e-40, -1.2338e-40,
-1.4957e-01, -1.9087e-02, 7.1170e-02,
-1.4435e-01, 8.9560e-02, 1.3879e-01,
-3.6992e-02, 5.9822e-02, 1.9241e-02,
-2.4402e-03, 1.5097e-01, 6.3958e-02,
-1.7630e-01, 3.6009e-01, -2.0383e-01,
-8.5106e-03, 4.0863e-03, -2.7575e-02,
7.8942e-02, -1.8640e-01, -6.7715e-02,
7.2777e-02, -1.3804e-01, -7.0332e-02,
1.5185e-01, -4.3530e-02, 1.4502e-01,
-3.2928e-02, -3.0583e-02, 9.2061e-02,
1.2493e-01, 1.0400e-01, 1.3780e-01,
1.4438e-01, 8.2051e-02, 1.6159e-02,
2.7478e-02, 1.7768e-01, 2.5945e-01,
-3.4662e-01, 2.0330e-03, 8.8118e-02,
-2.9628e-01, -1.3212e-01, -1.8145e-02,
-1.9330e-01, 3.9238e-02, -4.6944e-02,
-1.5668e-01, -5.7104e-02, 1.9558e-01,
6.5305e-02, 5.9933e-02, 7.7337e-02,
-2.4906e-02, -1.1235e-01, 1.3822e-02,
-3.9988e-02, -9.1882e-03, 1.9204e-02,
1.0504e-01, 4.6820e-03, -2.1836e-02,
-2.6953e-40, 2.5334e-40, -1.3028e-40,
1.4110e-41, 5.6841e-40, 3.6368e-40,
-1.1746e-41, -7.0658e-41, -3.9413e-40,
1.5025e-02, 7.4419e-02, 9.5652e-02,
5.0297e-02, 6.6704e-02, 5.7316e-02,
2.5102e-02, 1.1985e-01, 2.6043e-02,
3.3297e-02, -7.7374e-02, -1.1114e-01,
-7.5586e-02, -1.9338e-02, -1.3739e-02,
4.5616e-02, -6.4946e-02, -6.9372e-02,
-7.5874e-03, -1.1141e-01, -2.9135e-02,
-6.9436e-03, -1.4418e-02, 1.6436e-03,
-1.3051e-01, -1.3324e-01, -9.3934e-02,
1.2184e-01, 1.9386e-01, 1.7995e-01,
-2.7452e-02, 9.9736e-02, 1.0020e-01,
-6.3290e-02, -2.1447e-02, -1.7005e-01,
1.3857e-01, 2.3338e-01, 2.5410e-01,
2.3002e-01, 1.9551e-01, 1.4452e-01,
4.7040e-01, 2.2647e-01, 1.5215e-01,
2.6927e-02, -2.1304e-01, -1.4762e-01,
-5.6998e-02, 2.9064e-01, 1.8085e-01,
8.9393e-02, -1.7463e-01, -2.7095e-01,
3.8434e-02, 1.7198e-01, -1.8122e-02,
-1.3857e-01, 1.9418e-01, 1.5019e-01,
-5.6337e-02, -5.3265e-01, 3.2122e-01,
-2.4484e-40, -5.3707e-40, 1.5854e-41,
5.1791e-40, -4.1875e-41, 5.6732e-40,
1.3048e-40, 1.6452e-40, -4.5028e-40,
-3.0692e-02, 1.8569e-01, 2.0327e-01,
-7.4756e-02, -5.1765e-02, 4.2475e-02,
-9.0675e-02, -3.0438e-01, -3.5088e-01,
-1.9129e-02, -1.5663e-03, 4.9895e-02,
-1.9441e-02, 9.3237e-02, 1.2910e-01,
-2.3919e-02, -4.0539e-01, 2.8167e-02,
2.0203e-01, 3.3424e-02, 1.7927e-02,
4.1923e-02, -1.6967e-01, 2.5656e-02,
-1.5869e-01, -1.8727e-01, 2.7860e-03,
-4.0276e-02, -6.7792e-03, 3.3699e-02,
-6.7044e-03, 1.7686e-02, 2.9786e-02,
-1.5623e-02, 3.7904e-02, 2.4737e-02,
-1.2282e-01, -3.6563e-02, 4.1976e-02,
-9.9622e-03, 8.8981e-02, 2.1364e-02,
-8.5668e-02, -1.6803e-01, -4.4974e-02,
1.3164e-01, 4.1294e-01, 1.8897e-01,
2.1991e-01, 1.6247e-02, 1.1569e-01,
-3.0142e-02, 1.4069e-02, 3.6646e-02,
-2.6816e-02, -3.9767e-02, 1.4061e-01,
-1.3603e-01, -2.0649e-01, 7.5837e-02,
-1.6984e-02, -8.3800e-03, 2.3652e-04,
1.5049e-40, 4.6504e-40, 1.3625e-40,
-7.5358e-40, -3.4257e-40, 9.9763e-41,
4.7243e-40, 7.4890e-40, -7.9440e-42,
-5.9692e-02, -2.8047e-02, 2.3795e-02,
-3.5284e-02, 1.1448e-02, 5.0302e-04,
-3.5066e-02, 4.6185e-02, 1.2167e-02,
3.7583e-02, -3.6598e-02, 1.0206e-01,
-9.6229e-02, -1.5977e-01, 4.9157e-02,
3.7293e-02, 5.8766e-02, 1.0448e-02,
1.1490e-01, 1.4459e-01, 8.6936e-02,
2.8609e-01, -4.8108e-02, 9.0023e-02,
6.7941e-02, -5.7148e-03, 1.0021e-01,
7.3816e-02, 7.3794e-02, 8.0970e-03,
2.8307e-02, 3.6635e-03, -1.1769e-01,
4.1374e-02, 3.9933e-02, -4.4292e-02,
5.9423e-02, 1.9009e-01, -2.3735e-01,
-2.6670e-01, 5.8789e-01, -2.0048e-01,
-3.7082e-01, 1.8045e-01, 5.4820e-02,
-6.3567e-01, 2.0098e-01, 1.0653e-01,
-2.5056e-01, 6.5065e-01, -4.0471e-01,
5.4715e-02, 2.4375e-01, -2.7402e-01,
1.5982e-01, 1.0923e-01, 2.1566e-01,
2.0239e-01, -9.0221e-02, -4.4606e-01,
1.0550e-01, 5.4666e-02, -2.7134e-01,
-4.6424e-40, 2.9137e-40, 7.4968e-41,
1.2376e-41, -5.6213e-40, -6.3457e-40,
2.5404e-40, 2.0013e-40, 3.5611e-40,
5.5423e-02, 3.9843e-02, -1.7509e-01,
5.4480e-02, 5.0331e-02, -1.6793e-01,
6.6093e-02, 3.0163e-02, -8.2023e-02,
-1.5490e-01, 1.7457e-01, 2.7832e-01,
1.1482e-01, 2.5759e-01, -2.4199e-01,
-9.3891e-02, 9.1921e-02, -6.4480e-03,
1.9266e-01, 5.2907e-02, 7.0289e-02,
1.3582e-01, 6.4246e-02, 1.4989e-01,
6.2013e-03, -6.8884e-02, 6.8734e-02,
-1.0483e-01, -7.7134e-02, -3.6204e-02,
1.7590e-02, 5.0844e-02, 1.4234e-01,
7.2913e-02, 6.0726e-02, 6.4414e-02,
-8.5021e-02, -1.0621e-03, 5.5851e-02,
2.4666e-01, 6.5652e-02, -1.8180e-02,
1.5225e-01, 1.2928e-01, 3.1578e-03,
1.1468e-01, 1.9544e-01, 6.6637e-02,
6.3430e-02, 2.0542e-01, 7.0876e-02,
3.4779e-02, 1.0037e-02, -2.2134e-02,
-6.9304e-02, 1.1184e-01, -3.7015e-02,
-1.7634e-01, 1.2475e-01, 9.1947e-02,
-6.0550e-02, -1.3904e-01, 7.5192e-02,
-2.2871e-40, 4.7367e-41, -1.0711e-40,
-2.8662e-40, 4.0542e-41, 3.3067e-40,
-4.4395e-41, -7.2684e-41, 1.8695e-40,
-1.6702e-01, -2.6654e-01, 8.7902e-03,
-2.0108e-01, -3.8093e-01, -8.3700e-02,
-7.5433e-02, -2.0689e-01, 2.7951e-02,
2.9938e-03, 1.1378e-01, 7.1598e-02,
-1.6031e-01, 1.3475e-01, 1.5800e-01,
-7.2019e-02, -1.1663e-01, 8.0692e-02,
1.0610e-01, 1.1163e-02, -1.4959e-01,
-1.1576e-01, -8.5645e-02, 4.0414e-02,
5.6245e-02, 1.7056e-01, 2.5734e-01,
-6.1086e-02, -7.0851e-02, 7.6851e-02,
-2.7595e-02, -6.0890e-02, 4.7472e-02,
7.1059e-03, 6.0942e-05, 7.4915e-02,
1.9350e-01, -1.8458e-02, -2.3040e-02,
6.3477e-02, 1.1923e-01, 9.9319e-02,
6.4839e-02, 2.7973e-01, 1.2902e-01,
-1.7829e-01, 5.7083e-03, -6.1680e-03,
-1.1256e-01, -2.7951e-02, -2.1544e-01,
-2.1614e-02, -7.1468e-02, -2.2054e-02,
-8.7543e-02, -1.2982e-01, 1.9386e-01,
-5.7157e-03, -1.0108e-01, 1.4467e-01,
-6.5742e-02, -7.2054e-02, 1.7924e-01,
7.5418e-40, 6.3043e-40, 4.9815e-40,
-1.0952e-40, 3.0327e-40, -2.3848e-40,
4.1302e-40, 2.0150e-40, -1.6509e-40,
-1.3985e-02, -1.0550e-01, 5.8772e-02,
-1.7108e-02, -7.3644e-02, 3.3014e-02,
-1.8224e-03, 2.8931e-03, 9.2762e-02,
4.1531e-02, -1.5139e-01, -1.7773e-01,
9.6548e-02, -1.1914e-01, -4.6536e-02,
8.6754e-02, -4.0057e-03, 1.8983e-01,
1.6545e-01, -4.7311e-02, -7.2455e-03,
3.7567e-01, 1.8883e-01, -7.4325e-02,
-5.8252e-02, -1.3811e-02, -7.0470e-02,
-3.2943e-02, -7.0770e-02, -1.4700e-01,
1.7043e-02, 9.4331e-02, 4.2857e-03,
4.1247e-03, 1.6690e-01, 4.2146e-02,
1.1420e-01, -7.4456e-02, -3.8763e-02,
1.6807e-01, 9.3636e-03, -1.1796e-01,
1.7703e-01, 1.1386e-03, -6.8707e-02,
1.0259e-01, -1.8918e-02, 6.5902e-03,
1.2421e-02, -7.8960e-02, 2.1766e-02,
1.3062e-01, 4.6001e-02, 2.4199e-01,
-1.2955e-02, -1.9329e-01, 5.2074e-03,
5.9446e-02, 1.8832e-01, 2.2094e-01,
-1.0954e-01, -8.1867e-02, -4.3324e-02,
-3.9596e-41, 2.8677e-40, -6.5843e-40,
4.2812e-41, -3.5323e-40, 4.8298e-40,
7.6351e-40, -2.4759e-40, 7.3030e-40,
-1.1284e-01, -8.4171e-02, -1.5935e-01,
-3.2299e-02, 1.5427e-01, 8.9029e-02,
-3.8815e-02, 1.3098e-01, -4.3065e-02,
-2.5276e-01, -1.7018e-01, 9.7901e-02,
1.4218e-01, 3.1236e-01, 2.9636e-01,
-2.3613e-02, -5.5258e-02, -2.0550e-01
}
,
{
0.0333, 0.1145, -0.0922,
0.1185, 0.4533, -0.2015,
-0.0774, 0.1759, -0.0496,
0.0954, -0.0499, 0.0824,
0.1059, 0.0173, -0.0586,
-0.0666, -0.0287, -0.0652,
-0.0558, -0.1362, 0.0015,
0.1277, 0.1020, -0.1369,
0.0020, -0.0103, -0.0804,
0.0507, 0.1404, -0.0241,
0.0520, 0.1239, 0.0633,
-0.0268, 0.0335, 0.0883,
-0.0549, -0.1022, -0.0515,
-0.0163, -0.1167, -0.0442,
0.0858, -0.0804, -0.0014,
0.0354, -0.0666, -0.2105,
-0.0950, 0.1578, -0.0920,
-0.1303, 0.0299, -0.0195,
-0.0281, -0.1993, -0.0154,
0.0796, 0.0503, 0.0954,
0.0540, 0.0212, 0.0389,
-0.1387, 0.1091, -0.1212,
0.1556, 0.3573, 0.0976,
-0.0587, -0.2070, 0.2067,
0.0138, 0.0051, -0.1008,
0.2877, 0.1079, -0.0681,
0.0953, -0.0739, -0.2349,
0.1482, 0.0657, 0.0480,
0.1590, -0.0009, 0.1402,
0.0700, 0.0435, 0.1190,
0.0957, 0.0117, -0.1010,
0.1790, -0.0200, -0.0765,
0.0797, 0.1455, -0.0340,
0.0008, -0.0267, 0.0089,
0.0644, 0.0647, 0.0397,
0.0463, -0.0116, -0.0771,
0.2237, 0.0324, 0.0192,
-0.0082, -0.0345, 0.0294,
0.0719, -0.0185, 0.1008,
-0.0307, 0.0134, -0.0747,
0.0776, -0.1485, 0.0135,
0.0965, -0.0665, -0.1263,
-0.0101, -0.0097, -0.0144,
-0.0022, -0.0083, 0.0277,
0.0136, -0.0076, 0.0314,
-0.0008, 0.0722, -0.0704,
0.0053, 0.0767, 0.0368,
-0.0189, -0.1354, 0.0231,
-0.1416, 0.1945, -0.1756,
0.2058, 0.0401, -0.1348,
-0.0945, -0.2530, -0.3082,
-0.0096, 0.0871, 0.0699,
-0.0092, 0.0423, 0.0995,
-0.0914, -0.0570, -0.0718,
-0.0739, -0.2749, -0.2320,
0.1488, -0.2698, -0.1977,
0.1445, -0.1655, -0.0758,
0.2035, -0.0138, 0.0332,
0.0282, -0.2247, -0.0945,
-0.0614, -0.2484, -0.0595,
-0.1174, -0.1252, 0.1969,
-0.1101, -0.2950, -0.2164,
-0.0348, -0.0891, 0.1250,
0.0195, 0.0050, 0.0300,
-0.0508, -0.0316, -0.0194,
0.0199, 0.0345, 0.0444,
-0.0022, -0.0529, 0.1604,
0.0756, -0.2015, -0.2117,
-0.0837, -0.1270, 0.1330,
0.0286, 0.0952, 0.1082,
0.0724, -0.0446, -0.1156,
0.0545, 0.0444, -0.0291,
0.0759, 0.1110, 0.0944,
0.1615, 0.4302, -0.1060,
0.0418, -0.0281, -0.1378,
-0.0757, -0.0527, -0.1578,
0.0123, -0.0427, 0.1504,
0.0694, 0.0690, 0.0203,
0.2132, -0.3449, 0.0936,
0.2491, 0.0279, -0.0884,
-0.0447, 0.1589, -0.0054,
-0.0246, 0.1247, 0.0403,
0.0513, -0.0541, -0.1141,
0.0712, -0.1174, -0.0051,
0.2304, 0.2431, -0.0517,
-0.1548, -0.0401, 0.2032,
-0.0087, -0.1676, -0.0600,
0.1094, -0.0329, 0.0530,
-0.0580, 0.1499, -0.0806,
-0.0086, -0.1400, -0.0636,
0.0708, -0.1003, -0.1113,
-0.0732, -0.1199, 0.0060,
-0.0534, -0.0011, 0.0965,
-0.0268, 0.0116, -0.1161,
0.0787, 0.3925, -0.0819,
-0.0041, -0.0892, -0.2063,
-0.1296, 0.0924, -0.0079,
0.5625, 0.4013, 0.1645,
-0.0137, -0.1935, 0.2714,
0.0980, 0.0016, -0.1461,
0.1576, 0.0305, -0.1450,
0.1503, -0.0303, -0.1403,
0.0262, -0.0077, 0.0459,
0.2718, 0.0754, 0.2404,
0.1381, -0.1499, 0.0016,
0.1454, -0.1278, -0.0085,
0.1674, -0.0834, 0.1993,
0.0874, -0.0598, -0.0188,
0.2003, 0.3296, 0.0153,
-0.0154, 0.5550, -0.0945,
0.0489, 0.0415, -0.0940,
0.0164, 0.0791, 0.1077,
-0.0893, 0.1231, 0.0473,
-0.0319, 0.1444, 0.1690,
-0.0518, -0.1404, -0.1778,
-0.0170, 0.1395, -0.0234,
0.0128, -0.0112, -0.0472,
0.1039, 0.1982, -0.0272,
0.0282, -0.1199, -0.2622,
-0.0449, 0.0239, -0.1030,
-0.0840, -0.1044, -0.0646,
0.0588, 0.1937, -0.2494,
0.0180, 0.0747, 0.1530,
0.0500, 0.1756, 0.0491,
-0.1113, -0.0079, 0.0854,
-0.1493, -0.0559, -0.0373,
0.1972, -0.3158, -0.0500,
0.1932, 0.3177, -0.0018,
-0.0516, -0.1144, 0.0686,
0.0175, 0.0598, 0.0345,
-0.0667, -0.1078, 0.0384,
0.0897, 0.2198, -0.0531,
-0.2596, -0.1997, 0.0195,
0.0332, 0.4098, 0.1381,
0.1985, -0.0669, -0.1275,
-0.0751, -0.2388, -0.0672,
0.0090, 0.0891, -0.0362,
0.1392, -0.0518, 0.2039,
0.2079, -0.1202, 0.0707,
0.0498, -0.1237, -0.0665,
-0.0398, -0.1557, -0.0928,
0.0505, 0.1220, 0.0352,
-0.0674, -0.1159, 0.0724,
-0.0331, -0.1751, 0.0766,
0.0992, -0.0763, 0.0090,
-0.1223, 0.2621, -0.2029,
0.0509, -0.0279, -0.1061,
0.0598, 0.0353, -0.1610,
0.0165, 0.0835, 0.0704,
-0.0079, -0.0982, 0.0187,
0.2331, -0.1929, 0.0684,
-0.0507, 0.1476, -0.0886,
-0.0275, 0.1658, 0.0697,
-0.1123, -0.0069, -0.0851,
-0.0377, -0.0917, -0.0629,
-0.0420, 0.0506, 0.1111,
0.1086, 0.1351, -0.0851,
0.0466, 0.2750, 0.0185,
-0.0208, 0.2090, 0.0271,
0.0217, -0.0548, 0.0078,
-0.0609, 0.1029, -0.1641,
0.1392, 0.0115, 0.0317,
-0.0570, 0.1060, 0.1814,
-0.2015, -0.1301, 0.1082,
0.2452, -0.1815, -0.0046,
0.0103, -0.0466, -0.0895,
0.0158, -0.0594, -0.1386,
-0.0073, -0.0719, -0.0716,
0.1308, -0.0206, 0.0511,
-0.0437, -0.0763, 0.0287,
0.0493, -0.1239, 0.0219,
-0.0041, 0.0373, 0.0262,
0.0078, -0.0249, -0.0284,
0.0598, -0.0205, -0.0276,
0.0115, -0.1778, -0.0395,
0.1673, -0.0036, 0.2334,
0.0706, -0.0694, 0.0177,
0.1123, -0.0043, 0.0716,
-0.0894, -0.1609, 0.0334,
-0.0046, -0.2006, -0.0977,
-0.0127, 0.1198, -0.0339,
-0.0283, 0.1354, 0.1637,
-0.1696, 0.0187, -0.2621,
0.0496, 0.2834, 0.0423,
0.1126, 0.3962, 0.1660,
-0.0750, 0.1955, 0.0590,
-0.1088, -0.1146, -0.1219,
0.1360, 0.1524, 0.0498,
-0.1151, 0.0219, -0.0063,
-0.0821, 0.0247, -0.1065,
0.1153, 0.2085, 0.0618,
-0.0383, 0.0527, -0.2067
}
,
{
1.8014e-01, 2.1908e-01, -2.1088e-03,
1.7345e-01, 2.7654e-01, 1.3607e-02,
1.1363e-01, 9.9105e-02, -6.5730e-02,
-3.5679e-02, 9.6072e-03, 4.0721e-02,
-1.8771e-02, -2.3484e-04, -1.0230e-02,
1.6965e-02, -1.3032e-02, -6.3906e-02,
-4.5686e-02, -3.6733e-02, -4.8873e-02,
4.0752e-02, 2.1615e-02, -1.4822e-02,
1.1689e-01, 3.0153e-02, -5.0163e-04,
-7.0394e-03, -1.2387e-01, -8.9243e-02,
-1.8312e-01, -1.3868e-01, -6.2618e-02,
-8.1627e-02, -2.0480e-01, -3.0740e-01,
4.4296e-02, 3.8572e-02, 4.3754e-02,
1.7538e-01, 5.3284e-02, -7.5663e-03,
1.9670e-01, -1.2397e-01, -1.6266e-01,
1.4575e-01, -5.7771e-02, 2.7619e-02,
2.2757e-02, -4.8910e-01, -2.6201e-01,
3.6513e-02, -2.0704e-01, -1.3225e-01,
-6.7533e-02, 1.1289e-02, 7.1316e-02,
-7.6847e-02, 6.8128e-02, 7.4717e-02,
1.1269e-01, 2.9978e-02, 3.2132e-02,
-5.4557e-02, -4.4599e-02, 4.1835e-02,
5.7964e-02, -2.1246e-03, 1.5007e-01,
1.8432e-01, 1.1463e-01, 2.2691e-01,
9.6166e-02, 4.7887e-02, -3.8399e-02,
5.8153e-02, -2.0255e-02, -1.1362e-01,
2.6402e-02, 2.5562e-02, 1.9096e-02,
1.1588e-01, 1.4540e-01, 1.1948e-01,
1.0360e-01, 5.9083e-02, 1.9263e-01,
1.6953e-01, 2.7390e-02, 9.7883e-02,
1.5059e-01, 6.7593e-02, -4.5843e-03,
8.7031e-02, -2.0926e-03, -6.3056e-02,
-6.6960e-02, -5.2056e-02, -7.3570e-02,
1.4361e-02, 1.1059e-01, -4.9720e-02,
4.4270e-02, 3.9995e-02, 4.3101e-03,
-1.1042e-01, 4.5028e-02, -8.9124e-02,
-1.2906e-01, -7.6972e-02, -6.5449e-03,
-1.9269e-01, 2.8349e-01, 1.1573e-01,
-1.7983e-01, 9.7615e-02, 9.4003e-03,
-4.7802e-02, -1.5889e-01, -1.2693e-01,
7.4717e-02, 2.8655e-01, -7.2637e-02,
1.5837e-02, 8.7125e-02, -1.2198e-01,
-1.7754e-02, -5.6443e-02, -9.8661e-03,
6.3040e-02, 2.0249e-02, -3.5368e-02,
9.7756e-03, 2.6760e-02, -5.5172e-02,
-1.0406e-02, 4.8313e-02, 2.4717e-02,
-5.2851e-02, 6.8496e-02, -2.5933e-02,
4.5932e-02, 5.9892e-02, 1.9200e-02,
-5.1316e-40, -5.1811e-40, -1.5144e-40,
-6.7758e-38, -5.4608e-40, -3.9680e-40,
-1.9155e-39, 2.0423e-41, 1.5256e-41,
-2.5559e-08, -3.2461e-08, -2.6821e-08,
-3.6885e-08, -4.6896e-08, -3.9086e-08,
-3.4305e-08, -4.4160e-08, -3.7187e-08,
-3.7416e-40, 3.6550e-40, 5.0727e-40,
-1.6722e-40, 3.9228e-40, 5.4548e-40,
-5.7512e-40, -2.8156e-40, 9.4571e-41,
-4.7040e-40, -1.6974e-40, 6.3849e-40,
-3.7322e-40, 2.6014e-40, 2.3080e-40,
-2.8395e-40, -3.7116e-40, 4.4393e-40,
1.1597e-40, 4.3291e-40, 3.8219e-40,
3.3393e-40, 3.1747e-40, -1.8400e-36,
-5.5215e-40, 1.7648e-40, -1.6540e-35,
-3.0953e-40, 5.3063e-40, -1.6454e-40,
2.1341e-40, 2.0790e-40, -3.0226e-40,
-2.6807e-40, -1.6601e-40, 5.1829e-40,
-1.8897e-40, -4.5956e-41, 5.3784e-40,
-2.5661e-40, -2.1726e-40, 1.2010e-40,
1.8263e-41, 1.1214e-40, -3.7693e-40,
-4.2596e-40, 1.8854e-40, 5.5010e-40,
-6.6262e-40, -4.8808e-40, 3.3123e-40,
5.9379e-41, 2.3249e-40, 4.4504e-40,
-8.4836e-04, -8.4397e-04, -5.8640e-04,
-8.3506e-04, -8.0192e-04, -5.3901e-04,
-8.3539e-04, -7.8069e-04, -4.8720e-04,
-3.4706e-04, -4.4640e-04, -5.2353e-04,
-4.4518e-04, -5.3374e-04, -5.2734e-04,
-5.8780e-04, -5.8730e-04, -5.4362e-04,
-5.2452e-04, -5.4578e-04, -5.6266e-04,
-4.2387e-04, -4.4643e-04, -4.8936e-04,
-3.5880e-04, -3.7886e-04, -4.1998e-04,
-2.4479e-04, -4.0736e-04, -3.1189e-04,
-3.4922e-04, -4.0173e-04, -2.5042e-04,
-5.7091e-04, -5.2665e-04, -2.3293e-04,
-2.8505e-04, 9.7283e-05, 3.1209e-04,
-2.7463e-04, 1.8704e-04, 4.4351e-04,
-9.1436e-05, 3.2602e-04, 5.7573e-04,
-4.0112e-04, -4.2566e-04, -2.4300e-04,
-9.9362e-05, -6.5499e-05, 3.2872e-05,
1.1584e-04, 2.3417e-04, 3.4427e-04,
-7.5767e-05, 3.9768e-06, 6.2201e-05,
2.3151e-05, 2.5595e-04, 3.4038e-04,
-1.3871e-05, 3.0295e-04, 4.4170e-04,
-1.7802e-04, -4.5376e-04, -5.1847e-04,
-5.0687e-04, -5.5837e-04, -2.5917e-04,
-5.3992e-04, -7.1375e-04, -4.8728e-04,
-1.7543e-01, -3.4151e-01, -3.2619e-02,
-1.9701e-02, -1.5494e-01, -1.6534e-01,
3.5632e-02, -1.0897e-01, -3.8379e-02,
-6.1420e-02, -1.0735e-01, 1.4730e-01,
7.4386e-02, -1.0487e-01, 7.9646e-02,
1.7130e-02, 4.4391e-02, -5.1959e-03,
4.5682e-02, -1.1543e-01, 9.4035e-03,
-3.4376e-01, -1.1961e-01, 1.0099e-01,
1.1335e-01, 7.5840e-02, 1.0675e-01,
4.9539e-02, 8.7406e-02, 4.4951e-02,
1.8111e-01, 2.6406e-01, -1.5924e-02,
-1.1464e-01, 8.4579e-04, -6.6811e-02,
-8.9635e-03, 1.8236e-03, 3.6561e-02,
-7.0281e-02, 2.9717e-01, 3.1836e-02,
-1.3647e-01, -6.5627e-02, 9.3063e-02,
-2.1851e-01, -6.0226e-02, -1.0326e-01,
5.3441e-02, 1.9103e-01, -5.7999e-02,
-3.3512e-02, 1.5496e-01, -1.1111e-01,
2.3256e-03, -1.5004e-01, -9.1248e-02,
-9.7706e-02, 1.9549e-01, -1.5403e-01,
-1.5327e-01, 8.3335e-02, 5.6111e-03,
-1.5707e-01, 8.0277e-03, -7.3955e-02,
-1.4111e-01, -1.3548e-01, -1.0563e-01,
2.3054e-01, -2.1822e-02, -6.6938e-03,
-1.0259e-01, 4.3577e-02, -1.7630e-01,
1.6484e-01, 4.2413e-01, 6.9475e-02,
-2.4705e-01, 2.5757e-01, -9.5611e-02,
1.0236e-01, -3.4820e-02, -6.8818e-03,
-1.1434e-01, -3.1800e-01, 2.1337e-02,
-1.9939e-01, -2.6532e-01, 7.3361e-02,
6.5939e-02, 9.5812e-02, -7.0156e-02,
-1.6249e-02, -1.5927e-02, -1.1189e-01,
-9.3936e-03, -1.0933e-01, -2.9399e-02,
-2.8752e-02, -4.5613e-02, -1.2718e-02,
3.8781e-01, 2.6776e-01, -1.0373e-02,
-2.3927e-02, -6.4398e-02, 9.9117e-02,
-6.0732e-02, -5.5917e-03, 5.1716e-02,
-1.4168e-01, 1.7661e-01, -5.5893e-02,
-3.0419e-01, -3.5537e-01, 2.1978e-01,
-1.8610e-01, -5.7743e-03, 3.2649e-02,
1.9975e-01, 1.6508e-01, 1.3808e-02,
1.0733e-01, 1.4722e-01, 5.8671e-02,
6.4940e-02, 1.6114e-01, 3.9697e-02,
1.1530e-01, 2.4021e-01, -2.1669e-01,
6.0220e-02, 2.0257e-01, -1.5227e-01,
-6.1096e-02, 6.6511e-02, -1.3858e-01,
-6.5275e-02, 1.0891e-01, 8.2048e-02,
-6.7907e-02, 2.2863e-02, -1.0322e-01,
1.6542e-01, -1.4436e-01, 6.4125e-02,
-1.0378e-01, -3.2346e-01, -1.5123e-02,
3.8758e-03, 1.1006e-01, -4.4325e-02,
-1.0102e-01, -3.7699e-02, 9.2472e-02,
-6.8972e-02, -1.2308e-02, 1.6478e-01,
3.4351e-02, -1.7461e-02, 1.0301e-01,
-2.7125e-01, -5.6730e-02, -2.5989e-01,
-3.0163e-01, -1.4826e-01, -3.4955e-01,
-1.6259e-01, -1.6708e-01, -2.7964e-01,
-6.7134e-02, -2.2385e-01, 2.1776e-01,
-1.1351e-02, -3.7861e-01, 1.8687e-01,
4.0551e-02, 8.1943e-02, 1.0866e-01,
1.0273e-01, 1.1844e-01, -1.1852e-01,
2.6758e-02, -8.5806e-02, 5.9444e-02,
-5.1627e-02, 7.1636e-02, 2.2841e-01,
-3.7242e-03, 2.9723e-01, 1.1918e-01,
8.4994e-02, -3.5747e-01, 3.6148e-02,
9.9705e-02, -1.3736e-01, -6.0080e-02,
1.2370e-01, 5.0668e-02, -6.0246e-02,
6.0562e-02, -3.5068e-01, -3.2645e-01,
9.1020e-04, 6.6203e-02, -1.0770e-01,
1.9434e-02, 3.0018e-01, 2.8018e-01,
1.4021e-01, 2.7481e-01, 2.2868e-01,
4.8540e-02, 1.7719e-01, -4.5834e-02,
-9.6349e-02, -2.3008e-02, -1.4497e-01,
4.3053e-02, -1.0161e-01, 2.8750e-02,
-1.2594e-01, -1.0388e-02, -4.3966e-02,
7.5993e-02, -7.1609e-02, 1.4624e-02,
4.1110e-02, 7.1258e-02, -2.9109e-02,
-5.8698e-03, 1.2389e-01, 4.7648e-02,
-6.1585e-04, -4.4556e-02, -2.3373e-02,
-4.4883e-02, -7.7722e-02, -7.3635e-02,
-2.7750e-02, -1.5117e-03, -8.7368e-02,
2.5113e-02, 7.7490e-02, 2.9024e-02,
1.5426e-01, 2.5472e-01, 4.8057e-02,
-1.1969e-01, -1.1487e-01, -1.1802e-01,
-4.7392e-02, -4.2226e-02, 3.1968e-02,
-2.6717e-01, -5.0206e-02, 8.1946e-04,
-4.0426e-02, 1.4373e-01, -3.3121e-03,
-4.5292e-02, -2.4538e-02, 1.0377e-01,
-1.7780e-02, 2.0058e-01, -2.4343e-02,
-1.1714e-02, 1.5984e-01, -1.2638e-01,
6.4655e-02, 3.7703e-02, 3.7970e-02,
9.1864e-03, 1.1468e-01, -6.2760e-04,
-1.4812e-01, 6.5670e-03, 1.0765e-01,
1.5023e-01, -7.0594e-02, -1.3924e-01,
3.6016e-02, -3.9078e-02, -3.8950e-02,
1.8735e-02, -1.5573e-01, -1.2456e-01
}
,
{
4.8634e-02, -1.3617e-01, 6.1231e-02,
-7.0235e-02, -6.4110e-01, 1.5985e-01,
8.6151e-02, 1.1847e-01, 1.3819e-01,
-3.6017e-04, -3.2273e-02, -8.5485e-02,
-7.0804e-03, 2.1751e-01, 7.2575e-03,
-8.3606e-02, -1.4885e-01, -1.2702e-01,
4.0848e-41, 8.0934e-40, -1.8889e-40,
-3.9103e-40, -7.4709e-40, 3.8377e-40,
-2.4159e-40, -4.7610e-40, 7.7359e-40,
-8.6217e-05, -5.9763e-05, -4.0558e-05,
-7.4966e-05, -4.7074e-05, -3.1656e-05,
-9.8390e-05, -6.6833e-05, -4.7669e-05,
3.5375e-02, 2.8660e-02, 4.1277e-02,
1.6289e-01, -3.2199e-01, -1.7845e-02,
2.4659e-01, -3.9618e-02, 4.1065e-03,
2.7267e-02, 8.6819e-02, 9.5070e-02,
-7.2700e-02, -2.8826e-01, 1.1750e-03,
2.5259e-02, 2.4681e-03, 6.4737e-02,
7.3023e-03, 2.9631e-02, 1.0820e-02,
-2.1400e-02, 5.4244e-01, 1.5639e-01,
-1.7561e-01, 4.8947e-01, -8.8305e-02,
6.5073e-02, 3.4922e-01, 1.3483e-01,
1.4506e-01, -2.5472e-01, -7.2894e-02,
4.5945e-02, 1.4040e-01, 1.2148e-01,
-2.6932e-01, -1.1518e-01, -9.3158e-03,
-2.3961e-01, -1.2479e-01, -8.9796e-02,
1.8688e-02, -4.9267e-02, 7.7189e-02,
-7.3691e-02, 7.8186e-03, 1.3761e-02,
-1.5689e-01, 3.1138e-02, 3.9231e-02,
-4.3607e-03, 2.0813e-01, 5.5635e-02,
-6.7000e-41, 9.8995e-41, 3.0043e-40,
6.7190e-40, 4.0827e-40, 7.6057e-40,
4.2208e-40, 8.1141e-40, -3.3569e-40,
1.0179e-03, 5.1543e-04, 3.8076e-04,
7.3507e-04, 4.5432e-04, 3.7410e-04,
9.3014e-04, 6.7365e-04, 6.0051e-04,
-5.1998e-02, 6.5768e-02, 3.1603e-02,
-3.0198e-02, -3.1692e-02, -6.9299e-02,
1.7672e-02, 2.3766e-01, 5.7877e-02,
-5.7944e-02, 1.2624e-01, -1.4396e-01,
-4.1542e-02, 6.5110e-01, 1.0942e-01,
-1.3133e-01, 5.0538e-02, -2.7371e-02,
-3.7515e-02, 2.8703e-02, 1.2382e-03,
3.8542e-01, -2.2754e-02, 3.4459e-02,
3.0545e-01, -5.3817e-01, -2.1389e-03,
1.3888e-02, -2.2775e-01, -6.3692e-02,
-1.8430e-01, 5.8452e-02, 4.5764e-02,
-8.5045e-02, -1.7060e-01, -1.8565e-02,
-2.0384e-02, -3.3018e-02, -5.1135e-02,
-4.5789e-02, -1.8105e-01, 3.5419e-02,
-5.0081e-02, 8.7719e-02, 1.0373e-01,
-1.0033e-02, 7.0530e-02, -7.8012e-03,
8.4042e-02, 1.1982e-01, -9.6046e-02,
-6.4009e-02, -1.0711e-01, -1.3523e-01,
1.8868e-41, -7.0039e-40, -7.2568e-40,
1.7408e-40, -7.8143e-40, -6.8130e-40,
-6.3142e-40, -6.2560e-40, -7.4238e-40,
2.6297e-04, 7.0014e-05, -4.0981e-04,
2.6263e-04, 4.2811e-05, -4.9950e-04,
3.9795e-04, 1.2615e-04, -4.7660e-04,
7.5933e-02, 2.6295e-02, 2.7984e-02,
-5.5914e-03, -8.7981e-02, -9.2618e-02,
4.2725e-02, -3.1210e-01, 1.3412e-01,
5.2683e-02, 3.9891e-01, 2.9150e-02,
-6.6090e-02, 2.9455e-01, -1.9710e-01,
1.4546e-02, -2.5572e-02, 8.1125e-02,
1.2271e-01, 1.6097e-01, 4.5644e-02,
3.6101e-02, -1.7174e-02, 6.6110e-02,
1.5078e-01, 4.5180e-01, 7.7154e-02,
-5.9725e-02, 1.0185e-01, 1.1363e-03,
6.7791e-02, 1.7696e-02, 5.2638e-02,
3.3051e-02, -8.4049e-02, 1.4380e-01,
1.8744e-02, -2.0940e-01, -2.1424e-01,
-2.1329e-01, -1.3154e-01, -3.2572e-01,
1.1292e-01, 1.2361e-02, -1.5506e-01,
-1.0362e-02, 1.9955e-02, 4.2639e-02,
-2.1952e-02, -2.4682e-02, -2.4453e-02,
-2.5606e-02, -3.3580e-02, -3.6340e-02,
-5.0830e-40, 6.3797e-40, -5.2775e-40,
-7.7988e-40, -7.4579e-40, -5.1901e-40,
-3.8275e-41, -5.7607e-40, -1.3656e-40,
2.7164e-04, 5.9977e-04, 8.6886e-04,
3.0116e-04, 7.0106e-04, 1.0248e-03,
2.9177e-04, 6.4748e-04, 9.4825e-04,
6.6310e-02, 1.5240e-02, -5.3044e-02,
1.2545e-01, 5.0582e-02, 2.7358e-02,
1.9338e-01, 1.1377e-01, 4.6110e-02,
-3.1997e-02, 1.5171e-02, -4.9372e-02,
5.4615e-04, 1.7262e-01, -2.2081e-01,
8.4871e-02, 1.7824e-02, -3.6429e-02,
4.2821e-02, -1.0055e-01, 4.8927e-02,
1.2524e-01, 5.8859e-02, -2.0980e-02,
2.2897e-01, 1.7594e-01, 3.4239e-02,
1.0915e-01, 1.2088e-01, 1.0151e-01,
6.8449e-03, -1.5546e-01, 1.2024e-01,
4.9036e-02, -1.2245e-01, 4.6713e-02,
7.5083e-03, -4.8084e-02, 9.7731e-03,
4.8779e-02, 3.1848e-02, -9.3517e-02,
6.4595e-02, 3.9337e-02, -7.2343e-02,
3.9519e-02, 4.1867e-02, -5.0485e-02,
2.5257e-02, 1.4071e-01, 1.3606e-01,
1.7481e-01, 2.0210e-01, 1.7241e-01,
-7.6295e-40, -7.8460e-40, -4.1806e-41,
-7.9994e-40, -7.3271e-40, -6.2665e-40,
-7.9602e-40, -7.0226e-40, -7.4131e-40,
-4.5544e-04, -5.2379e-04, -7.0755e-04,
-3.3807e-04, -3.8123e-04, -5.3222e-04,
-3.1771e-04, -3.4586e-04, -4.8784e-04,
-3.5257e-02, -1.1866e-02, 1.9717e-02,
-6.0777e-02, -7.3127e-03, -3.2825e-02,
-1.4952e-01, 3.2117e-01, -6.3786e-02,
-1.0255e-02, 1.2961e-01, -8.6823e-02,
1.6994e-01, 4.7491e-01, 2.7135e-01,
2.8538e-03, 1.5572e-01, -3.3736e-02,
8.5996e-02, -1.0176e-02, 2.6629e-02,
7.3362e-02, -7.7525e-03, 5.6261e-02,
1.0819e-01, -2.5863e-01, -5.7146e-03,
-7.1781e-02, 2.8376e-03, 7.8298e-02,
1.3183e-01, 2.7149e-02, -9.9786e-02,
9.0491e-02, 8.7938e-02, -2.1882e-02,
4.1396e-03, -4.5816e-02, -7.8892e-02,
-6.3855e-03, 1.7502e-01, 1.2053e-01,
1.2492e-01, 6.1258e-02, -4.0516e-02,
-4.5409e-02, -4.5877e-02, -7.6414e-02,
-1.0573e-02, -1.2517e-01, -4.3991e-02,
-2.6447e-02, -9.5478e-02, -2.4735e-02,
-4.6548e-41, -1.6443e-40, -3.1221e-40,
-3.2675e-40, -2.7265e-40, -3.1190e-40,
-2.2065e-40, -2.5407e-40, -6.9511e-40,
-1.2727e-04, -2.6585e-04, -3.5516e-04,
3.4272e-05, -1.6810e-04, -3.1677e-04,
-5.5355e-05, -2.9924e-04, -4.3692e-04,
-5.6428e-02, 1.0771e-01, 1.0185e-01,
2.2948e-01, -7.8744e-02, 6.0768e-04,
-2.2355e-03, -2.0128e-03, -5.7317e-03,
-7.1232e-03, 1.0297e-01, 1.6872e-01,
1.9194e-01, -1.1578e-01, 1.0732e-01,
-8.6952e-02, 3.2901e-02, -6.6658e-03,
7.3979e-02, 8.3875e-02, -7.6372e-03,
1.9577e-01, 2.7391e-01, 4.5275e-02,
1.5610e-01, 2.3802e-01, 1.6555e-02,
1.3814e-01, 1.2870e-01, 9.1626e-02,
-4.6890e-02, -8.8734e-02, 7.8866e-02,
1.0027e-01, 2.2139e-01, 1.0050e-01,
-6.5845e-02, -1.0990e-01, -6.9896e-02,
4.1687e-02, 3.0631e-02, -8.8441e-02,
-1.1868e-01, 1.0836e-02, 2.5873e-02,
-1.7114e-02, 7.6295e-02, 1.5439e-02,
-2.4271e-02, 5.8538e-02, 9.8190e-02,
4.9742e-02, 8.7807e-02, 6.5871e-02,
-7.2669e-40, -7.5936e-41, -7.4975e-40,
-1.6984e-42, -1.7334e-40, -8.4954e-41,
-2.1556e-41, -1.5374e-40, -1.5515e-40,
-6.2626e-04, -7.2727e-04, -8.1665e-04,
-5.6584e-04, -6.1190e-04, -6.9584e-04,
-5.6278e-04, -5.8554e-04, -6.3554e-04,
8.1550e-02, -4.1817e-03, 1.2301e-02,
-4.5800e-02, 4.6708e-02, -8.7972e-02,
-2.9880e-01, 2.6456e-01, 3.9363e-03,
-3.0939e-02, -1.9921e-01, -3.8689e-03,
-8.6803e-02, 3.4857e-01, -1.0201e-01,
2.1597e-02, 1.4380e-02, 4.3448e-02,
7.1195e-02, 1.4980e-01, 3.8079e-02,
-1.2678e-01, -8.1274e-02, -4.3445e-02,
5.2482e-02, -1.8763e-01, 1.1557e-01,
-9.4614e-02, 5.4415e-02, -3.1485e-02,
-3.6451e-02, 1.4379e-01, 5.2291e-02,
-9.2069e-02, 9.5675e-02, -5.8433e-02,
7.5768e-03, -7.1280e-02, -1.4576e-01,
-1.4671e-01, -1.2446e-01, -1.5207e-01,
-5.4368e-02, 3.8303e-02, -8.1794e-02,
2.0492e-02, 4.0910e-02, 1.1379e-02,
3.1582e-02, 3.6039e-02, -4.4040e-03,
1.7540e-02, 1.4097e-04, -6.4367e-02,
-7.9553e-40, -5.3941e-40, -7.1912e-40,
-5.8099e-40, -6.8315e-40, -6.6012e-40,
-7.6242e-40, -5.4784e-40, -7.0267e-40,
-2.9197e-04, -2.1994e-04, -1.9501e-04,
-2.6516e-05, -1.2642e-05, -8.4345e-05,
1.6763e-04, 1.1268e-04, -5.4516e-05,
-3.8007e-03, -6.8765e-02, -9.5716e-02,
6.3091e-02, -8.1971e-02, -9.2895e-02,
-6.8353e-03, 7.3639e-02, 1.3505e-01,
9.0083e-02, 2.4352e-01, 3.9708e-02,
-5.4051e-02, -6.8748e-02, -1.8937e-01,
-1.9808e-03, -7.1337e-02, -2.8316e-02,
8.1504e-02, 8.3226e-03, 6.9013e-03,
9.4393e-02, 5.9322e-02, 5.5023e-02,
1.0236e-01, -4.0205e-02, 3.5172e-02,
6.5381e-02, 4.9075e-02, -5.3931e-02,
4.3961e-02, 9.0223e-03, -4.1678e-02,
-6.4262e-02, -5.0304e-02, -9.3597e-02
}
,
{
3.8496e-01, 1.4287e-01, 3.4530e-02,
-5.5398e-01, -6.0381e-02, 1.2078e-02,
7.9983e-02, 2.1478e-01, -5.7915e-02,
-1.4020e-01, -2.6914e-02, 1.5915e-02,
1.2371e-01, 2.5496e-01, -2.9867e-02,
1.3269e-02, -9.9596e-02, -2.3173e-01,
5.1471e-02, -4.5507e-01, -7.7620e-02,
-5.1328e-02, -1.9808e-02, -4.7051e-02,
3.0573e-02, 7.8762e-02, -7.2627e-02,
6.8690e-02, -4.0125e-02, 5.6657e-02,
8.0208e-02, -2.0075e-02, 1.4019e-01,
-5.7959e-02, -7.3152e-02, 2.0202e-02,
-8.8702e-02, -1.9911e-01, -1.5570e-01,
2.8401e-02, 5.8802e-02, 1.3050e-01,
2.1905e-02, -3.4298e-02, 4.0447e-02,
1.0184e-01, -9.0101e-02, -9.2770e-02,
1.1713e-02, -3.2514e-01, 1.9393e-01,
-9.4227e-02, 2.7053e-01, -9.7233e-02,
-1.0478e-01, 6.0652e-02, 8.3399e-02,
1.1104e-01, 2.9008e-01, 4.9208e-02,
-1.5414e-02, 3.1718e-02, -7.9083e-02,
-5.2358e-03, 9.0101e-02, 5.2973e-02,
5.5527e-02, -1.6599e-02, -8.5167e-02,
-5.1018e-02, 7.2243e-03, -9.5684e-02,
-5.0608e-02, -6.7864e-02, -8.9496e-02,
-2.4348e-01, 2.7477e-01, -1.7588e-01,
1.3927e-01, 5.5502e-02, -1.3370e-02,
-4.3509e-02, -2.1511e-01, -5.9070e-02,
1.0293e-01, 4.2678e-01, -8.7527e-02,
-6.8546e-02, -5.6296e-02, -8.7962e-02,
-8.6130e-02, 9.2069e-02, 7.2303e-02,
2.4365e-02, 2.1988e-01, -7.9408e-03,
-3.0063e-02, 1.1554e-01, -5.0311e-02,
1.0605e-02, 5.4598e-02, 1.3826e-02,
-1.4342e-02, 1.5353e-01, -5.3974e-03,
1.5583e-01, -6.0889e-02, -1.5772e-02,
-2.5956e-02, -3.5285e-01, -2.0338e-01,
2.6011e-01, 2.2737e-01, -1.4693e-01,
-7.7964e-02, 1.0053e-01, -5.4278e-02,
-3.0668e-02, 3.4556e-02, -3.4321e-02,
7.8695e-02, -2.2357e-01, 9.5733e-02,
1.7483e-01, -1.5153e-01, -1.8262e-03,
4.7605e-02, -2.2834e-01, 4.6383e-02,
1.5701e-01, 3.2264e-01, 1.0334e-02,
6.3351e-02, 1.1340e-01, 8.3478e-02,
6.4196e-02, 3.3460e-02, 8.8473e-02,
5.4663e-02, -1.7665e-03, -4.1935e-02,
-6.1346e-03, -5.4463e-02, -6.2960e-02,
2.8159e-02, 2.9903e-02, 9.2429e-03,
-3.0041e-02, -9.7783e-02, -4.9500e-02,
9.5350e-02, -7.9143e-02, -1.3244e-01,
-6.5129e-02, 1.4568e-01, 6.6843e-02,
1.5241e-01, -7.8736e-02, 1.0721e-01,
-5.9015e-02, 1.5320e-01, 3.0796e-01,
-5.4266e-03, -6.0804e-02, 3.7326e-02,
7.4844e-02, 4.8340e-02, 1.5251e-01,
3.8158e-02, 1.2087e-01, -8.9003e-02,
-5.8369e-02, -7.3813e-02, 1.2240e-02,
-4.5106e-03, 7.4580e-02, 1.2042e-01,
4.1959e-02, 1.4529e-01, 5.3636e-03,
-4.9708e-03, -1.0775e-02, -5.9374e-02,
1.5358e-02, 1.7277e-02, -1.5412e-01,
8.1647e-02, 3.3503e-02, -8.1934e-02,
-1.5807e-02, -1.0001e-02, -1.0059e-02,
-9.0493e-03, -7.8954e-02, 4.3891e-02,
-9.3815e-03, 3.2241e-02, 4.7962e-02,
-7.2252e-03, 7.9324e-02, 2.0662e-02,
-5.7710e-02, -5.1142e-02, -1.4296e-01,
2.1501e-02, -1.9518e-02, -2.7658e-02,
1.4983e-01, 8.5447e-02, 7.2092e-04,
1.1275e-01, 6.1131e-02, 5.7955e-02,
1.5624e-02, 2.7225e-01, 1.1716e-01,
-1.6322e-04, -1.3368e-04, -1.5575e-04,
-1.0525e-04, -1.0765e-04, -1.5306e-04,
-8.9692e-05, -1.0857e-04, -1.7316e-04,
-1.8015e-03, -1.3733e-03, -3.9154e-04,
-1.8453e-03, -1.4238e-03, -4.4163e-04,
-1.5511e-03, -1.1131e-03, -2.0087e-04,
-2.4082e-03, -2.2576e-03, -1.9231e-03,
-2.4913e-03, -2.4136e-03, -2.1678e-03,
-2.5057e-03, -2.4650e-03, -2.2732e-03,
-2.3901e-05, -1.5870e-05, -5.8255e-06,
-1.5163e-05, -1.2370e-05, -6.0712e-06,
-1.3098e-05, -1.1132e-05, -5.7866e-06,
-5.9760e-03, -5.9998e-03, -6.0295e-03,
-5.9962e-03, -6.0100e-03, -6.0277e-03,
-6.0003e-03, -6.0059e-03, -6.0148e-03,
-3.2764e-05, -2.9574e-05, -2.8001e-05,
-1.0846e-05, -1.1569e-05, -1.4282e-05,
-1.6255e-06, -2.5666e-06, -4.7808e-06,
-5.1999e-03, -5.2334e-03, -5.2847e-03,
-5.2057e-03, -5.2283e-03, -5.2713e-03,
-5.2195e-03, -5.2321e-03, -5.2633e-03,
-3.0782e-06, -9.2118e-06, -1.6177e-05,
-1.6382e-06, -6.9559e-06, -1.4245e-05,
-1.1471e-06, -6.5984e-06, -1.4903e-05,
7.7574e-02, -1.2866e-02, 4.1348e-03,
-6.7298e-02, -1.3691e-01, 6.4079e-02,
3.7962e-02, 8.7737e-02, -4.1046e-02,
-2.8471e-02, 1.7647e-01, 6.4232e-02,
1.2316e-01, 3.6800e-01, -1.5740e-01,
-6.0839e-02, 1.5449e-02, -1.0761e-01,
-6.6869e-02, -1.2867e-01, -4.0195e-02,
-4.9651e-02, -5.5500e-02, -2.5879e-02,
2.0179e-02, 6.8467e-02, 2.6575e-02,
-6.7728e-04, -7.6269e-02, 2.3470e-02,
7.1869e-02, -1.1855e-01, -2.1067e-02,
1.3263e-01, -3.2957e-02, -3.4365e-03,
8.1936e-02, 1.3073e-01, 1.1477e-01,
1.2429e-01, 1.6129e-01, 1.6251e-01,
1.5476e-02, 3.2862e-02, 2.1999e-02,
-2.9189e-02, -3.3615e-02, 5.5616e-04,
-2.4059e-02, -9.6181e-03, -4.1175e-02,
-6.3680e-04, -9.6559e-02, -9.1448e-02,
3.0238e-02, 1.2534e-01, 1.5256e-02,
-4.2118e-02, 1.5723e-01, 2.6929e-03,
1.9873e-02, 5.3050e-02, -1.0153e-03,
2.0634e-02, 9.2825e-03, -6.8027e-03,
3.1335e-03, -7.7443e-03, -1.8307e-02,
7.9974e-03, -1.0283e-03, -6.2520e-03,
4.5050e-02, 9.9504e-02, -1.3404e-01,
-6.7271e-01, -5.7290e-02, 2.6919e-02,
2.3673e-01, 2.4688e-02, -2.0227e-02,
5.1389e-02, -3.9810e-02, -8.9700e-02,
2.8445e-02, 3.9136e-01, -1.1508e-01,
-1.0449e-01, -6.2005e-02, 6.5721e-02,
-1.9123e-01, -4.2613e-02, 3.5371e-02,
1.9207e-01, 8.7916e-02, 4.8089e-02,
-5.7912e-02, 1.0014e-01, -9.4659e-02,
1.1240e-02, -6.2254e-03, 1.3399e-01,
1.6483e-01, -3.5079e-01, 1.1612e-02,
2.9215e-01, 5.6875e-02, 6.9505e-02,
1.3721e-02, 1.2607e-01, 2.6426e-02,
-2.0529e-01, 2.1768e-01, 2.1232e-01,
-6.3574e-02, 2.3504e-02, -1.0811e-01,
-1.3470e-02, -3.6446e-02, -5.4379e-02,
-1.3257e-01, -8.3412e-02, 3.7745e-02,
5.8778e-02, -2.6060e-01, 3.8262e-02,
-4.3689e-03, -6.6703e-02, -2.2025e-01,
-9.0961e-02, 1.3855e-01, 3.4573e-04,
-2.9613e-01, -3.6138e-02, -1.3827e-01,
4.5896e-02, -5.3871e-02, -1.0037e-01,
1.8457e-01, 1.0338e-01, -5.7306e-02,
5.5510e-02, -9.4938e-02, -5.6527e-05,
1.6372e-01, -3.3854e-02, 5.6332e-02,
-4.0251e-01, -5.9428e-02, -9.1470e-02,
-1.5921e-02, -5.7948e-02, 8.1682e-03,
-3.7833e-03, 1.6293e-01, 5.3784e-02,
1.1053e-01, -1.3867e-01, 2.6772e-02,
-1.3133e-02, 3.7614e-01, 3.6361e-03,
-1.4205e-01, 3.1312e-02, -9.9928e-02,
-1.5755e-01, 4.2016e-01, 9.4065e-02,
2.7536e-02, 1.2620e-01, -1.4894e-01,
-4.2137e-02, -9.8700e-02, -1.7479e-01,
4.5836e-02, 5.3893e-02, -1.0138e-01,
8.3609e-02, 2.1849e-02, -1.0648e-01,
7.4801e-02, -1.2671e-01, -1.5007e-02,
2.7440e-01, -3.1351e-01, 6.5787e-02,
-6.7820e-02, 1.6312e-01, -1.3254e-02,
-2.5770e-02, -2.0041e-02, 5.8243e-02,
1.6055e-02, 1.1971e-02, -4.6112e-02,
-1.6276e-01, -1.5313e-02, -7.9826e-03,
9.1668e-02, 9.7722e-02, 1.3754e-01,
-7.4817e-02, -4.1923e-01, -1.2337e-01,
1.3472e-01, -4.0745e-02, -5.4055e-02,
-1.2943e-02, 4.8796e-02, 4.2007e-02,
9.4668e-02, 8.6149e-02, 1.2362e-01,
7.0637e-02, 2.3565e-01, 1.4582e-01,
5.6904e-02, -8.2166e-02, 1.0563e-01,
9.3969e-02, -2.2909e-01, 4.6537e-02,
6.5257e-02, 1.4804e-01, -6.2092e-02,
-1.5699e-02, -1.5303e-02, 1.6671e-01,
-6.1947e-03, 2.5749e-01, 1.5257e-01,
3.2908e-02, -5.9907e-02, 1.1502e-01,
7.5876e-02, -2.6699e-01, -1.5891e-02,
-8.0426e-02, 1.3406e-01, -1.9881e-02,
3.5472e-02, -8.2140e-02, 1.6509e-02,
8.3390e-03, -7.8291e-02, -2.0754e-01,
3.4490e-02, 2.7913e-01, 5.9566e-02,
2.5288e-02, 1.1725e-01, -1.0356e-01,
-5.0955e-02, 9.2093e-02, -5.8477e-02,
4.4325e-02, 3.2973e-02, -1.9477e-01,
3.9582e-02, -8.6877e-02, -1.1753e-01,
3.0401e-02, -2.8757e-02, -2.5563e-02,
5.0741e-02, -3.5056e-01, -2.5584e-01,
9.1709e-02, -4.0932e-02, 2.3812e-01,
5.0945e-02, 4.9246e-02, 1.2738e-01,
5.1440e-03, 1.5703e-01, 5.5743e-02,
-3.9492e-02, 1.2114e-01, 2.0531e-02,
8.0800e-02, 2.6680e-03, -1.6660e-02,
1.0684e-01, 1.2308e-01, 1.7882e-02,
1.8280e-02, 1.0972e-01, -5.2912e-03
}
,
{
-1.3812e-02, -4.6271e-02, 7.3790e-02,
-6.3801e-02, -3.6817e-01, -1.7880e-02,
5.2986e-02, 1.8626e-01, 1.5645e-03,
1.2367e-02, -6.2923e-02, 3.0844e-02,
9.3623e-02, 1.9527e-01, -2.6366e-02,
-2.0837e-02, -3.4424e-02, 4.0256e-02,
4.1482e-02, 6.1795e-02, -1.1293e-02,
-8.9944e-02, -1.3608e-01, 1.8067e-02,
3.6974e-02, 5.2530e-03, -2.7474e-02,
1.1872e-05, 1.9000e-05, 2.0729e-05,
1.0139e-05, 1.6832e-05, 1.9392e-05,
6.5445e-06, 1.0973e-05, 1.3521e-05,
-5.3340e-02, 1.3108e-03, 4.0436e-02,
5.7068e-02, -2.7923e-02, -5.4781e-02,
-2.9293e-02, 2.7145e-02, 2.7340e-02,
5.3520e-03, 1.8766e-02, 4.0297e-01,
2.6473e-02, -3.4675e-02, -1.1783e-01,
-2.5038e-02, -1.7702e-02, -3.4908e-02,
1.4847e-02, 2.3237e-01, -6.3687e-02,
-6.5672e-02, -2.1888e-01, -1.7233e-02,
4.0608e-02, -6.9580e-02, -2.2200e-02,
5.8163e-02, 1.3695e-01, -2.6257e-02,
-1.3328e-01, -3.5730e-01, 2.4507e-02,
-4.5611e-03, 2.0424e-01, -3.9821e-02,
5.5300e-02, -1.6006e-01, 1.1717e-01,
-2.6107e-02, -8.6995e-02, 8.3720e-02,
7.5494e-02, 3.2189e-01, 1.5527e-01,
-6.6869e-02, 1.4469e-01, 5.1805e-02,
9.8760e-02, -1.6759e-01, -1.2350e-01,
5.7005e-02, 8.4904e-02, 8.9713e-02,
-1.4263e-02, 2.8914e-02, 3.2239e-02,
-2.4871e-02, 5.6014e-02, -4.4469e-02,
3.1209e-02, 1.3677e-02, -2.1052e-02,
-1.6548e-03, -1.8796e-03, -1.9883e-03,
-1.6186e-03, -1.8494e-03, -1.9670e-03,
-1.5841e-03, -1.8173e-03, -1.9345e-03,
3.5726e-02, 1.8013e-01, 1.6913e-02,
-1.2168e-01, -6.3848e-02, 3.0555e-02,
3.0269e-02, -1.0260e-01, -1.5259e-02,
-4.7375e-03, 5.5115e-02, 6.2642e-01,
9.9776e-03, -2.1988e-01, -2.0984e-01,
7.0470e-03, 6.3178e-02, -1.3607e-02,
1.1918e-01, -2.4081e-01, 1.7889e-01,
-1.0514e-01, 2.9220e-01, -1.3263e-01,
5.6091e-03, -4.1623e-02, 2.5589e-02,
-1.8496e-01, 2.7698e-02, -6.5768e-02,
2.9677e-01, 4.4163e-02, 5.8530e-02,
-1.1010e-01, -7.6787e-02, 3.9844e-02,
5.2113e-03, -1.8202e-02, 1.4129e-03,
-6.1402e-03, -2.7222e-01, 7.4690e-02,
1.9131e-02, 2.2753e-01, 1.9587e-02,
-2.7391e-02, 6.7917e-03, 2.0496e-03,
6.7333e-02, 7.8262e-02, 2.1110e-03,
-5.4519e-02, 3.0763e-02, 1.5628e-02,
9.5055e-02, 3.8855e-02, 1.2446e-02,
-1.5152e-01, 7.8124e-02, -1.2616e-02,
9.3100e-03, -1.6528e-02, -1.2873e-02,
-1.8377e-03, -1.9231e-03, -1.8930e-03,
-1.8058e-03, -1.8841e-03, -1.8678e-03,
-1.7387e-03, -1.7966e-03, -1.7781e-03,
-4.5122e-02, 1.7027e-03, -3.5534e-03,
8.5222e-03, 1.0130e-01, 4.7893e-02,
6.5574e-02, 7.2150e-03, -2.1820e-03,
-5.5105e-03, -1.8990e-01, 2.6527e-02,
6.6140e-03, 2.1537e-01, -2.2183e-02,
-8.0628e-03, 6.8398e-03, 9.4474e-03,
1.2239e-01, -1.3337e-01, 7.3391e-02,
-1.2205e-01, 1.3145e-01, -2.0063e-02,
2.2168e-02, 3.6097e-03, 2.7146e-02,
4.6717e-02, 2.1122e-02, 1.5491e-02,
-1.3077e-01, 1.1635e-01, 1.0849e-02,
8.0113e-02, -8.4028e-02, 1.2863e-03,
-2.9796e-02, -8.4537e-02, -2.6766e-03,
-7.7771e-03, -2.4274e-03, 8.6274e-02,
-2.0354e-02, 4.1245e-02, 8.4227e-02,
5.5894e-02, 1.0706e-01, 5.2965e-02,
-7.8731e-03, 5.5825e-01, 1.0373e-01,
-1.1975e-01, -2.0071e-02, -2.5286e-02,
-7.7477e-02, 5.3589e-02, -1.5710e-03,
-1.2753e-01, 2.5166e-01, 8.2205e-03,
-9.8349e-02, -4.9539e-02, -5.4941e-02,
-4.9916e-03, -4.9986e-03, -5.0660e-03,
-4.9770e-03, -4.9840e-03, -5.0543e-03,
-4.9997e-03, -5.0114e-03, -5.0809e-03,
6.1819e-02, 1.5061e-01, 1.1984e-02,
1.2905e-01, 2.5921e-01, 1.4768e-01,
4.5548e-02, 1.4902e-01, -4.8961e-03,
-1.3605e-02, 8.2896e-02, -4.1931e-01,
-2.2657e-02, 2.4768e-01, 2.6528e-01,
-1.1566e-02, -8.7819e-03, 4.3618e-02,
-3.4332e-02, -1.8392e-01, 4.4471e-02,
-3.7073e-02, -5.4620e-02, 1.0899e-01,
3.7891e-02, 9.9487e-02, 3.2383e-02,
-6.3628e-02, -5.0303e-03, 5.4617e-02,
-8.7802e-02, 2.1977e-01, -6.0249e-03,
6.3554e-02, -5.4291e-02, -2.6709e-02,
-1.5505e-02, -6.7104e-02, 3.8607e-02,
-1.1427e-01, -3.2524e-01, 4.0077e-02,
-6.5144e-03, 1.2313e-01, -2.7924e-02,
1.4265e-02, -3.8338e-02, 8.6780e-02,
1.5341e-01, 1.2174e-01, -7.3160e-02,
2.6326e-04, 7.3690e-02, 5.2187e-02,
-3.3114e-02, -3.6588e-02, 1.1635e-02,
-3.3521e-02, 1.0767e-01, -8.9125e-03,
-2.2431e-02, -4.5655e-03, 7.5531e-03,
6.7227e-04, 7.2856e-04, 7.3907e-04,
6.5335e-04, 7.0702e-04, 7.1233e-04,
6.1540e-04, 6.7286e-04, 6.7797e-04,
-3.1496e-02, 6.0514e-02, 4.2013e-02,
-2.8617e-02, 1.4846e-02, 4.0016e-03,
4.7006e-03, -4.0017e-02, -3.0411e-02,
-9.6037e-03, 8.8522e-02, 9.8616e-02,
4.1297e-02, -3.2645e-01, -7.6144e-03,
-1.0711e-02, 3.9324e-02, 4.0144e-02,
5.2899e-02, -7.8668e-02, -5.4798e-02,
-2.0428e-01, 5.7238e-02, -3.6937e-02,
-3.6103e-02, -8.2683e-02, -2.8101e-02,
8.2479e-02, 5.7766e-02, -1.2019e-01,
-3.8373e-01, 6.8272e-02, -1.1758e-02,
5.1129e-02, -2.7931e-01, 4.5608e-02,
-2.5151e-02, -5.0816e-02, 1.7231e-02,
-3.6376e-02, 1.5916e-01, 2.9192e-02,
-4.1947e-02, 5.3183e-02, -9.7289e-02,
4.6138e-02, 7.0842e-02, 1.6673e-02,
-1.7243e-03, 2.7203e-01, 3.8262e-02,
-1.4000e-01, -7.3793e-02, -2.0050e-02,
-1.8750e-02, -8.5319e-02, -3.0858e-02,
-5.9981e-02, 1.2729e-01, 1.4094e-02,
-5.4088e-02, -2.3694e-02, -9.7485e-03,
-4.7840e-03, -4.8359e-03, -4.8727e-03,
-4.7882e-03, -4.8380e-03, -4.8755e-03,
-4.7859e-03, -4.8321e-03, -4.8633e-03,
4.9511e-02, 1.0935e-01, -3.7430e-03,
1.1834e-01, 7.7243e-02, 4.3074e-02,
6.7446e-02, 2.9734e-02, -1.1276e-02,
-2.0080e-02, 1.3561e-01, -1.3455e-01,
-1.4505e-02, 2.2100e-01, 4.9635e-02,
-1.0040e-02, 3.4560e-02, -7.4607e-03,
-6.8873e-02, -5.6221e-02, 1.2255e-02,
-2.9198e-02, 7.1612e-02, 2.9402e-02,
4.1036e-02, 4.6417e-02, 6.0284e-03,
-6.5261e-02, 2.1426e-03, 2.4192e-02,
-1.6073e-03, -6.2222e-03, -1.8295e-02,
2.4952e-04, -2.0623e-02, -3.3064e-03,
5.9188e-02, -4.8839e-02, 7.9840e-02,
-6.7952e-02, -4.7191e-01, 1.5117e-01,
1.5668e-01, 2.4733e-01, 1.1354e-01,
1.7742e-02, -4.4059e-02, 9.5374e-03,
3.2049e-01, -1.3779e-01, 9.6608e-02,
8.4580e-02, 1.4293e-01, 6.1574e-02,
2.8777e-03, 7.8795e-02, -5.1902e-02,
1.2212e-01, 1.0321e-01, 3.2360e-02,
-9.6617e-02, 7.8941e-03, -7.0876e-02,
3.5869e-03, 3.5891e-03, 3.5923e-03,
3.5746e-03, 3.5840e-03, 3.5967e-03,
3.5785e-03, 3.5932e-03, 3.6080e-03,
1.5454e-03, 3.0582e-03, 4.3737e-02,
-5.9833e-02, -1.1247e-01, 4.4380e-02,
-1.3206e-01, 8.2778e-03, 4.7963e-02,
-4.3720e-02, -7.5722e-03, 2.0510e-01,
3.0133e-02, -4.0506e-01, 2.7867e-01,
5.5586e-02, 2.8926e-02, 1.3360e-03,
1.9490e-05, 3.3326e-01, -7.7241e-02,
-1.5648e-01, 1.5195e-01, -1.3995e-01,
8.6519e-02, 1.0447e-01, -4.1413e-02,
-3.8667e-03, 1.6159e-01, 1.1627e-01,
-2.2646e-01, -3.4758e-02, -6.7956e-03,
-3.2689e-01, 1.9606e-01, -9.1523e-02,
1.1238e-02, 1.5084e-03, 4.2113e-02,
-1.1154e-02, -3.6596e-01, -7.2252e-02,
6.6621e-02, 1.0188e-01, 4.1032e-01,
3.5892e-02, -4.8304e-02, 6.6142e-03,
1.3374e-01, 2.2720e-01, -7.1224e-02,
6.8952e-02, 2.0467e-01, 5.0251e-02,
-6.2016e-02, 2.2175e-01, -1.7764e-02,
2.7542e-02, 1.4905e-01, 3.6637e-02,
-7.2231e-02, 5.0271e-03, -7.1823e-02,
3.5760e-03, 3.5540e-03, 3.5692e-03,
3.5664e-03, 3.5490e-03, 3.5689e-03,
3.5671e-03, 3.5619e-03, 3.5864e-03,
2.7470e-02, -3.9752e-02, 4.1063e-02,
-2.4985e-02, -1.7969e-01, 8.2186e-02,
-5.4251e-02, -5.9651e-03, 2.5079e-02,
-2.1197e-02, 2.5426e-02, 1.3585e-01,
-1.3460e-02, -1.1377e-01, 1.2278e-01,
3.6533e-02, 1.2843e-02, 5.6219e-02,
5.8141e-04, 2.8354e-01, -6.2016e-02,
-1.0289e-01, 1.8724e-01, -9.9475e-02,
5.1193e-02, 7.5986e-02, -1.2951e-03,
-8.2587e-02, 1.8498e-01, 1.0891e-01,
1.3538e-01, -4.7728e-01, 1.0868e-01,
-8.6415e-02, -1.7061e-01, 1.0457e-02
}
};
static __device__ __constant__ const float HDNL3biasL[8][8] =
{
{
-0.1175, -0.0258, -0.0053, -0.0437, -0.0563, -0.1047, -0.3449, 0.0568
}
,
{
0.0339, -0.1738, 0.0061, 0.1565, -0.0316, -0.0016, -0.0032, -0.0554
}
,
{
-0.0508, -0.0609, 0.0347, -0.0802, -0.0438, 0.2512, -0.0491, -0.0259
}
,
{
0.0655, 0.0255, 0.0228, -0.0027, -0.0155, -0.0163, -0.0174, -0.1095
}
,
{
4.9947e-03, 5.3372e-03, -4.5286e-09, -1.3756e-03, 3.8858e-03, -4.4197e-02, 3.3970e-02, 2.8411e-02
}
,
{
-0.0396, 0.0007, 0.1735, 0.0109, 0.1177, 0.0919, 0.0567, -0.0005
}
,
{
0.0127, -0.0688, 0.1102, -0.0052, 0.1602, -0.0191, -0.0322, 0.0311
}
,
{
0.0063, 0.0093, 0.0729, 0.3734, 0.0006, 0.1915, 0.3186, 0.2636
}
};
static __device__ __constant__ const float HDNL3kernelsL10[4 * 8] =
{
-0.0967, -0.3094,
0.3537, 0.5705,
0.2547, 0.3360,
-0.0718, -0.0700,
-0.3013, -0.1602,
0.4520, 0.0495,
0.1564, 0.3773,
-0.0216, 0.4367,
-0.4855, -0.1972,
-0.2026, -0.4390,
0.3743, -0.1156,
0.4408, -0.3123,
-0.3577, 0.0753,
-0.3396, 0.0336,
0.1052, -0.4180,
0.0799, -0.3587
};
__global__ static void conv1To8HDNL0(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 0)),
RELU(CHANNEL1TO8(1, 0)),
RELU(CHANNEL1TO8(2, 0)),
RELU(CHANNEL1TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 0)),
RELU(CHANNEL1TO8(5, 0)),
RELU(CHANNEL1TO8(6, 0)),
RELU(CHANNEL1TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv1To8HDNL1(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 1)),
RELU(CHANNEL1TO8(1, 1)),
RELU(CHANNEL1TO8(2, 1)),
RELU(CHANNEL1TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 1)),
RELU(CHANNEL1TO8(5, 1)),
RELU(CHANNEL1TO8(6, 1)),
RELU(CHANNEL1TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv1To8HDNL2(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 2)),
RELU(CHANNEL1TO8(1, 2)),
RELU(CHANNEL1TO8(2, 2)),
RELU(CHANNEL1TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 2)),
RELU(CHANNEL1TO8(5, 2)),
RELU(CHANNEL1TO8(6, 2)),
RELU(CHANNEL1TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv1To8HDNL3(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 3)),
RELU(CHANNEL1TO8(1, 3)),
RELU(CHANNEL1TO8(2, 3)),
RELU(CHANNEL1TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 3)),
RELU(CHANNEL1TO8(5, 3)),
RELU(CHANNEL1TO8(6, 3)),
RELU(CHANNEL1TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL0(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 0)),
RELU(CHANNEL8TO8(1, 0)),
RELU(CHANNEL8TO8(2, 0)),
RELU(CHANNEL8TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 0)),
RELU(CHANNEL8TO8(5, 0)),
RELU(CHANNEL8TO8(6, 0)),
RELU(CHANNEL8TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL1(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 1)),
RELU(CHANNEL8TO8(1, 1)),
RELU(CHANNEL8TO8(2, 1)),
RELU(CHANNEL8TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 1)),
RELU(CHANNEL8TO8(5, 1)),
RELU(CHANNEL8TO8(6, 1)),
RELU(CHANNEL8TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL2(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 2)),
RELU(CHANNEL8TO8(1, 2)),
RELU(CHANNEL8TO8(2, 2)),
RELU(CHANNEL8TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 2)),
RELU(CHANNEL8TO8(5, 2)),
RELU(CHANNEL8TO8(6, 2)),
RELU(CHANNEL8TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL3(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 3)),
RELU(CHANNEL8TO8(1, 3)),
RELU(CHANNEL8TO8(2, 3)),
RELU(CHANNEL8TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 3)),
RELU(CHANNEL8TO8(5, 3)),
RELU(CHANNEL8TO8(6, 3)),
RELU(CHANNEL8TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL0(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
T c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL1(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
T c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL2(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
T c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL3(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
T c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL0<uchar>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL1<uchar>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL2<uchar>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL3<uchar>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL0<ushort>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL1<ushort>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL2<ushort>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL3<ushort>(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
void Anime4KCPP::Cuda::cuRunKernelACNetB(const unsigned char* inputData, unsigned char* outputData, ACCudaParamACNet * param)
{
hipError_t err = hipSuccess;
if (currCudaDeviceID)
{
err = hipSetDevice(currCudaDeviceID);
CheckCudaErr(err);
}
hipStream_t stream;
hipStreamCreate(&stream);
hipChannelFormatDesc inoutChannelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
hipChannelFormatDesc tmpChannelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(param->orgW, param->orgH, 2);
const int W = 2 * param->orgW, H = 2 * param->orgH;
hipArray_t cuInputArray;
err = hipMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
hipArray_t cuArray1;
err = hipMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuArray2;
err = hipMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuOutputArray;
err = hipMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, hipArraySurfaceLoadStore);
CheckCudaErr(err);
struct hipResourceDesc resDesc;
struct hipTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuInputArray;
hipTextureObject_t inTex = 0;
err = hipCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
hipSurfaceObject_t surf1 = 0;
err = hipCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
hipSurfaceObject_t surf2 = 0;
err = hipCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
hipSurfaceObject_t outSurf = 0;
err = hipCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = hipMemcpy2DToArrayAsync(cuInputArray, 0, 0, inputData,
param->stride, sizeof(uchar) * param->orgW, param->orgH,
hipMemcpyHostToDevice, stream);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0, uchar)
break;
case 1:
RUNKERNEL(1, uchar)
break;
case 2:
RUNKERNEL(2, uchar)
break;
case 3:
RUNKERNEL(3, uchar)
break;
default:
RUNKERNEL(0, uchar)
break;
}
err = hipHostRegister(outputData, sizeof(uchar) * W * H, hipHostRegisterDefault);
CheckCudaErr(err);
err = hipMemcpy2DFromArrayAsync(outputData, sizeof(uchar) * W,
cuOutputArray, 0, 0, sizeof(uchar) * W, H,
hipMemcpyDeviceToHost, stream);
CheckCudaErr(err);
err = hipStreamSynchronize(stream);
CheckCudaErr(err);
err = hipHostUnregister(outputData);
CheckCudaErr(err);
hipDestroyTextureObject(inTex);
hipDestroySurfaceObject(surf1);
hipDestroySurfaceObject(surf2);
hipDestroySurfaceObject(outSurf);
hipFreeArray(cuInputArray);
hipFreeArray(cuArray1);
hipFreeArray(cuArray2);
hipFreeArray(cuOutputArray);
hipStreamDestroy(stream);
}
void Anime4KCPP::Cuda::cuRunKernelACNetW(const unsigned short int* inputData, unsigned short int* outputData, ACCudaParamACNet* param)
{
hipError_t err = hipSuccess;
if (currCudaDeviceID)
{
err = hipSetDevice(currCudaDeviceID);
CheckCudaErr(err);
}
hipStream_t stream;
hipStreamCreate(&stream);
hipChannelFormatDesc inoutChannelDesc = hipCreateChannelDesc(16, 0, 0, 0, hipChannelFormatKindUnsigned);
hipChannelFormatDesc tmpChannelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(param->orgW, param->orgH, 2);
const int W = 2 * param->orgW, H = 2 * param->orgH;
hipArray_t cuInputArray;
err = hipMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
hipArray_t cuArray1;
err = hipMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuArray2;
err = hipMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuOutputArray;
err = hipMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, hipArraySurfaceLoadStore);
CheckCudaErr(err);
struct hipResourceDesc resDesc;
struct hipTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuInputArray;
hipTextureObject_t inTex = 0;
err = hipCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
hipSurfaceObject_t surf1 = 0;
err = hipCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
hipSurfaceObject_t surf2 = 0;
err = hipCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
hipSurfaceObject_t outSurf = 0;
err = hipCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = hipMemcpy2DToArrayAsync(cuInputArray, 0, 0, inputData,
param->stride, sizeof(ushort) * param->orgW, param->orgH,
hipMemcpyHostToDevice, stream);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0, ushort)
break;
case 1:
RUNKERNEL(1, ushort)
break;
case 2:
RUNKERNEL(2, ushort)
break;
case 3:
RUNKERNEL(3, ushort)
break;
default:
RUNKERNEL(0, ushort)
break;
}
err = hipHostRegister(outputData, sizeof(ushort) * W * H, hipHostRegisterDefault);
CheckCudaErr(err);
err = hipMemcpy2DFromArrayAsync(outputData, sizeof(ushort) * W,
cuOutputArray, 0, 0, sizeof(ushort) * W, H,
hipMemcpyDeviceToHost, stream);
CheckCudaErr(err);
err = hipStreamSynchronize(stream);
CheckCudaErr(err);
err = hipHostUnregister(outputData);
CheckCudaErr(err);
hipDestroyTextureObject(inTex);
hipDestroySurfaceObject(surf1);
hipDestroySurfaceObject(surf2);
hipDestroySurfaceObject(outSurf);
hipFreeArray(cuInputArray);
hipFreeArray(cuArray1);
hipFreeArray(cuArray2);
hipFreeArray(cuOutputArray);
hipStreamDestroy(stream);
}
void Anime4KCPP::Cuda::cuRunKernelACNetF(const float* inputData, float* outputData, ACCudaParamACNet* param)
{
hipError_t err = hipSuccess;
if (currCudaDeviceID)
{
err = hipSetDevice(currCudaDeviceID);
CheckCudaErr(err);
}
hipStream_t stream;
hipStreamCreate(&stream);
hipChannelFormatDesc inoutChannelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc tmpChannelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(param->orgW, param->orgH, 2);
const int W = 2 * param->orgW, H = 2 * param->orgH;
hipArray_t cuInputArray;
err = hipMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
hipArray_t cuArray1;
err = hipMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuArray2;
err = hipMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuOutputArray;
err = hipMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, hipArraySurfaceLoadStore);
CheckCudaErr(err);
struct hipResourceDesc resDesc;
struct hipTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuInputArray;
hipTextureObject_t inTex = 0;
err = hipCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
hipSurfaceObject_t surf1 = 0;
err = hipCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
hipSurfaceObject_t surf2 = 0;
err = hipCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
hipSurfaceObject_t outSurf = 0;
err = hipCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = hipMemcpy2DToArrayAsync(cuInputArray, 0, 0, inputData,
param->stride, sizeof(float) * param->orgW, param->orgH,
hipMemcpyHostToDevice, stream);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0, float)
break;
case 1:
RUNKERNEL(1, float)
break;
case 2:
RUNKERNEL(2, float)
break;
case 3:
RUNKERNEL(3, float)
break;
default:
RUNKERNEL(0, float)
break;
}
err = hipHostRegister(outputData, sizeof(float) * W * H, hipHostRegisterDefault);
CheckCudaErr(err);
err = hipMemcpy2DFromArrayAsync(outputData, sizeof(float) * W,
cuOutputArray, 0, 0, sizeof(float) * W, H,
hipMemcpyDeviceToHost, stream);
CheckCudaErr(err);
err = hipStreamSynchronize(stream);
CheckCudaErr(err);
err = hipHostUnregister(outputData);
CheckCudaErr(err);
hipDestroyTextureObject(inTex);
hipDestroySurfaceObject(surf1);
hipDestroySurfaceObject(surf2);
hipDestroySurfaceObject(outSurf);
hipFreeArray(cuInputArray);
hipFreeArray(cuArray1);
hipFreeArray(cuArray2);
hipFreeArray(cuOutputArray);
hipStreamDestroy(stream);
}
| 79aaedbd159308ca8e5a9f26132df0e4d92b65f6.cu | #include"CudaHelper.cuh"
#include"CudaInterface.hpp"
typedef unsigned char uchar;
typedef unsigned short ushort;
extern int currCudaDeviceID;
#define RELU(x) fmaxf(x, 0.0f)
#define L2 0
#define L3 1
#define L4 2
#define L5 3
#define L6 4
#define L7 5
#define L8 6
#define L9 7
#define CHANNEL1TO8(n, Level) \
tl * HDNL##Level##kernelsL1[n * 9 + 0] + tc * HDNL##Level##kernelsL1[n * 9 + 1] + tr * HDNL##Level##kernelsL1[n * 9 + 2] + \
ml * HDNL##Level##kernelsL1[n * 9 + 3] + mc * HDNL##Level##kernelsL1[n * 9 + 4] + mr * HDNL##Level##kernelsL1[n * 9 + 5] + \
bl * HDNL##Level##kernelsL1[n * 9 + 6] + bc * HDNL##Level##kernelsL1[n * 9 + 7] + br * HDNL##Level##kernelsL1[n * 9 + 8] + HDNL##Level##biasL1[n]
#define CHANNEL8TO8(n, Level) \
tl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 0] + tc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 1] + tr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 2] + \
ml1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 3] + mc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 4] + mr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 5] + \
bl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 6] + bc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 7] + br1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 8] + \
tl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 0] + tc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 1] + tr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 2] + \
ml1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 3] + mc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 4] + mr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 5] + \
bl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 6] + bc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 7] + br1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 8] + \
tl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 0] + tc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 1] + tr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 2] + \
ml1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 3] + mc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 4] + mr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 5] + \
bl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 6] + bc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 7] + br1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 8] + \
tl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 0] + tc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 1] + tr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 2] + \
ml1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 3] + mc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 4] + mr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 5] + \
bl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 6] + bc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 7] + br1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 8] + \
tl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 0] + tc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 1] + tr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 2] + \
ml2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 3] + mc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 4] + mr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 5] + \
bl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 6] + bc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 7] + br2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 8] + \
tl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 0] + tc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 1] + tr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 2] + \
ml2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 3] + mc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 4] + mr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 5] + \
bl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 6] + bc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 7] + br2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 8] + \
tl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 0] + tc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 1] + tr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 2] + \
ml2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 3] + mc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 4] + mr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 5] + \
bl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 6] + bc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 7] + br2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 8] + \
tl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 0] + tc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 1] + tr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 2] + \
ml2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 3] + mc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 4] + mr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 5] + \
bl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 6] + bc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 7] + br2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 8] + HDNL##Level##biasL[L][n]
#define RUNKERNEL(Level, type) \
conv1To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (inTex, surf1, param->orgW, param->orgH); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf1, surf2, param->orgW, param->orgH, L2); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf2, surf1, param->orgW, param->orgH, L3); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf1, surf2, param->orgW, param->orgH, L4); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf2, surf1, param->orgW, param->orgH, L5); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf1, surf2, param->orgW, param->orgH, L6); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf2, surf1, param->orgW, param->orgH, L7); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf1, surf2, param->orgW, param->orgH, L8); \
conv8To8HDNL##Level <<<dimGrid, dimBlock, 0, stream >>> (surf2, surf1, param->orgW, param->orgH, L9); \
convTranspose8To1HDNL##Level<type> <<<dimGridout, dimBlock, 0, stream >>> (surf1, outSurf, W, H);
inline __device__ float clamp(float f, float a, float b)
{
return fmaxf(a, fminf(f, b));
}
static __device__ __constant__ const float HDNL0kernelsL1[9 * 8] =
{
0.0609, 0.1027, -0.0447,
-0.1423, 0.7196, 0.1803,
0.0842, 0.0696, 0.0082,
0.0089, 0.1540, -0.8589,
0.0448, 0.8659, -0.2420,
-0.0364, 0.0585, 0.0125,
-0.1937, 0.7259, 0.0119,
-0.8266, 0.4147, 0.0088,
-0.0453, -0.0451, -0.0182,
0.0264, -0.9422, 0.1258,
-0.0543, 0.1282, 0.7102,
-0.0106, 0.0386, -0.0141,
0.2054, -0.0393, 0.1494,
0.3106, 0.5722, 0.2640,
0.1708, -0.1640, -0.0212,
0.0558, -0.2887, -0.1666,
0.3123, -0.3097, -0.2281,
0.2880, 0.3001, 0.0526,
-0.0320, 0.0584, -0.0193,
-0.0135, 1.0649, -0.1246,
0.0283, -0.3030, -0.6378,
-0.0040, -0.9122, 0.0181,
0.0365, 0.8947, -0.0420,
-0.0199, 0.0217, 0.0060
};
static __device__ __constant__ const float HDNL0biasL1[8] =
{
-0.7577, -0.0210, 0.0292, -0.0189, 0.0223, 0.0340, 0.0150, -0.0044
};
static __device__ __constant__ const float HDNL0kernelsL[8][9 * 8 * 8] =
{
{
2.0611e-01, 6.6865e-02, -9.9123e-02,
8.5279e-02, -4.5549e-02, -2.9491e-02,
-1.0358e-01, -2.4844e-02, -8.1539e-03,
-1.1308e-01, -6.4228e-02, -8.8081e-02,
2.7810e-02, -1.6054e-01, -1.1985e-01,
-2.8679e-01, -1.7785e-02, 1.1559e-01,
2.1614e-02, -6.8870e-02, -2.4707e-01,
9.6867e-02, -1.6561e-01, 2.8281e-02,
-8.2469e-02, -9.8554e-02, -1.7147e-02,
3.3710e-01, 9.2126e-02, 3.6880e-02,
5.7004e-02, 4.0175e-02, 1.6116e-01,
2.5629e-01, 5.1154e-01, 2.4119e-02,
1.9495e-02, 2.6940e-01, -1.4050e-01,
5.0325e-02, -4.5920e-02, -1.3586e-01,
5.9458e-02, 1.3860e-01, -2.1065e-01,
-1.0744e-01, -1.5915e-01, -1.1528e-02,
-1.1470e-01, 6.3455e-02, -5.5558e-02,
-6.9920e-02, -3.0142e-02, -4.9059e-02,
3.6421e-01, 3.0252e-01, -1.3562e-01,
1.5238e-01, -1.9868e-01, -3.2644e-02,
-4.2849e-02, 1.3677e-02, 7.3854e-02,
7.6609e-02, -1.0121e-01, 3.6319e-02,
9.3536e-02, 6.0386e-02, 1.0086e-01,
-2.6630e-01, 2.5875e-02, -1.9225e-01,
4.0687e-02, 1.1005e-01, 9.9578e-03,
1.6939e-01, 5.0872e-01, 8.9876e-02,
6.9561e-02, 1.1910e-01, -1.8091e-02,
-3.5739e-02, -7.5300e-02, -1.6788e-02,
3.0316e-02, 1.5942e-01, -9.0878e-02,
-6.3737e-02, 2.6141e-02, 8.8040e-03,
3.4954e-03, -6.6707e-02, 1.4551e-01,
7.6258e-02, 1.4893e-01, -1.5255e-01,
6.2442e-02, 2.2166e-01, 7.5327e-02,
5.4785e-02, -1.4503e-02, -1.5188e-03,
1.6748e-01, -5.2731e-03, -1.9900e-02,
4.4786e-02, -1.0669e-01, 1.3192e-01,
1.9961e-02, -8.1015e-02, -3.2264e-02,
1.0544e-01, 1.8844e-01, 7.4274e-03,
6.6729e-02, -7.8318e-02, 3.0775e-02,
-8.6109e-03, 7.4977e-02, 9.4079e-02,
-1.2726e-01, -2.9664e-01, 7.8153e-03,
-4.8413e-02, -1.8450e-01, -7.1065e-02,
-8.7609e-02, -7.7192e-02, 5.0919e-02,
-1.4021e-01, 3.5696e-01, 1.2079e-02,
-2.0318e-02, -1.8827e-02, 3.9084e-02,
-2.8654e-02, -6.4166e-02, 5.4889e-02,
8.2689e-02, 8.4463e-02, 2.2339e-02,
1.0805e-01, -1.2566e-01, 1.7109e-01,
-6.1338e-02, -3.4043e-02, 4.0473e-02,
6.3821e-02, 1.7626e-01, -5.8112e-02,
-9.5002e-02, 1.3327e-02, 1.2242e-01,
4.9008e-02, -4.3678e-02, 2.2362e-02,
-7.7903e-02, -3.8252e-02, -5.2271e-02,
-1.8884e-02, -1.2859e-01, 4.1172e-02,
-3.1181e-02, 3.2348e-02, -4.9081e-02,
-6.7966e-02, -2.4896e-02, -6.5323e-02,
8.0742e-02, 2.6093e-01, -2.4638e-01,
-8.0881e-02, -2.9643e-02, -7.9627e-02,
1.4020e-01, 2.1575e-01, 8.1244e-03,
2.1561e-01, -2.9305e-01, -2.5535e-02,
-8.5538e-02, -1.4456e-01, -7.5664e-02,
-3.9921e-02, 4.0659e-02, 1.7812e-01,
1.1580e-01, 5.6628e-02, 9.0008e-02,
-2.2384e-02, -1.9788e-02, -4.0547e-02,
1.0070e-01, 2.9581e-01, 1.9936e-01,
-1.1957e-01, -8.6508e-02, -8.2543e-04,
-5.2879e-02, 1.5486e-01, 1.0829e-02,
1.4716e-01, 3.4257e-01, -3.2058e-03,
-2.1687e-02, 5.8641e-02, -6.3806e-02,
-3.2607e-02, 7.3328e-02, -6.4738e-03,
-1.0031e-01, -1.7698e-01, -9.4201e-02,
-3.3644e-02, -3.5860e-01, -9.3200e-02,
-7.4142e-02, 5.5001e-02, 4.3741e-02,
-2.2447e-03, 1.1941e-01, -1.6135e-02,
-1.4764e-02, -1.0194e-02, 3.2540e-02,
-1.0588e-01, -2.3000e-01, -1.1557e-02,
-9.0254e-02, 2.3352e-01, -1.3622e-01,
-1.9256e-03, -5.3372e-02, 1.0314e-01,
-2.0100e-02, 1.0700e-01, 1.6108e-01,
2.8422e-02, 2.7909e-01, 3.8342e-01,
1.4025e-02, 9.0965e-02, 2.0218e-01,
3.3562e-03, 7.6652e-02, 4.5974e-02,
-1.3617e-02, -1.4014e-01, -1.9253e-02,
1.1020e-01, -1.9678e-01, 6.7123e-02,
-3.3294e-02, -1.3006e-01, -1.0111e-01,
5.5813e-02, 2.1127e-01, 2.0248e-02,
-9.6386e-04, -5.2497e-03, 1.1134e-01,
2.8910e-02, 1.2229e-01, 1.8439e-01,
1.6413e-02, 1.5870e-01, -1.1616e-01,
-1.6032e-03, -6.8258e-03, -2.1883e-02,
1.2052e-01, -2.1982e-02, -1.3088e-01,
2.8664e-02, -5.0670e-02, 2.2927e-01,
2.0461e-02, 7.7250e-03, -2.6630e-02,
-9.0406e-02, -1.4174e-01, 9.8969e-02,
-6.6573e-02, -2.4425e-01, -3.5126e-02,
9.3859e-02, 1.9058e-01, -1.6569e-01,
-4.9163e-03, 7.4149e-02, 6.3345e-02,
-1.7888e-02, -9.1876e-02, 1.3728e-01,
-9.6098e-02, -3.4814e-02, -1.0862e-02,
4.8031e-03, 2.5206e-01, 8.0316e-02,
1.5102e-01, 4.1236e-02, 2.2339e-01,
2.8500e-01, 1.5106e-01, 9.6321e-04,
-6.0741e-02, 3.5759e-02, -1.8829e-01,
-1.1295e-03, -6.2322e-02, 8.4974e-01,
-3.9817e-02, -2.0666e-01, 2.2961e-01,
3.6857e-02, -2.0211e-02, -9.3342e-02,
2.0827e-02, 6.8874e-02, -6.0287e-02,
-6.9724e-02, 1.4423e-01, -7.6017e-02,
1.4718e-02, 1.8990e-01, 1.1789e-01,
-1.5018e-01, -2.3071e-01, 1.7511e-01,
-7.7605e-02, 5.0621e-02, -1.0381e-01,
8.6845e-02, -1.2410e-01, -4.4669e-01,
2.7930e-02, -5.4713e-02, -7.7923e-02,
8.6000e-02, -2.6371e-02, -8.6541e-02,
-1.1521e-01, 1.4389e-01, 5.0507e-02,
-1.6618e-02, -2.5150e-01, -4.9759e-02,
7.7166e-02, 4.5033e-03, -5.4649e-02,
2.8548e-03, -2.8078e-03, 8.1129e-02,
-4.5973e-02, 3.6740e-03, 2.0746e-01,
-9.8191e-02, 1.2807e-01, 8.1950e-03,
1.4240e-01, 1.5104e-01, 6.9624e-02,
2.2309e-01, 2.5688e-01, 9.4766e-02,
6.2560e-02, 7.1347e-02, 4.1432e-02,
-3.1829e-02, 1.5207e-01, 2.0575e-02,
-1.2506e-01, 2.9274e-01, 9.4712e-02,
-2.0520e-01, 4.9894e-04, 5.6171e-02,
-4.1567e-03, 6.6753e-02, -1.5767e-01,
6.3768e-02, 8.3008e-02, -3.5639e-01,
4.4660e-02, 2.6996e-01, -6.4014e-02,
8.5475e-02, 1.7854e-02, -6.4079e-02,
1.8760e-01, 1.5285e-01, -3.5614e-02,
1.0747e-02, -3.1330e-01, -4.8664e-02,
7.2150e-02, 1.7570e-01, 1.6716e-01,
6.2431e-02, 2.3755e-01, 2.8554e-01,
3.5791e-02, 2.8185e-01, 1.5810e-01,
-4.0886e-02, 1.8833e-02, -8.2903e-03,
1.3994e-02, -1.0846e-01, 3.5315e-02,
-6.2674e-02, 6.2806e-02, 2.2168e-02,
-3.6236e-01, -2.5326e-01, 5.6331e-02,
9.8762e-02, 3.8049e-01, 5.9885e-02,
-3.0541e-02, 7.9855e-02, -5.8639e-02,
1.1104e-03, 1.7147e-02, 3.3115e-02,
-3.3663e-02, 7.4615e-02, 6.4211e-02,
-7.3441e-02, -1.5568e-01, 7.6546e-02,
6.1802e-02, -1.5300e-01, -1.8209e-02,
-9.2786e-03, 1.6622e-01, 1.1354e-01,
9.5865e-03, -2.4226e-02, -1.4750e-03,
-5.5294e-02, -1.1839e-01, 3.8867e-03,
1.7262e-01, 4.2743e-01, 6.8970e-02,
-2.0232e-01, -1.4564e-01, 2.3025e-02,
-2.6139e-03, -1.6907e-02, 1.1693e-01,
-9.4871e-03, 3.8488e-02, -4.8351e-02,
-9.2171e-02, 4.8227e-02, 9.7378e-02,
-1.0292e-01, -1.2084e-01, -9.6676e-02,
1.8103e-02, 3.0658e-01, -7.7755e-02,
-2.4362e-02, -1.9862e-01, -6.9665e-02,
8.2944e-03, -1.4680e-01, -1.7371e-02,
-1.6534e-01, 2.5752e-01, 1.1129e-01,
-9.4151e-02, -1.3225e-01, 1.5933e-01,
9.0723e-02, 5.5469e-02, -1.4091e-01,
8.3404e-02, 1.3741e-01, -3.5438e-02,
3.2681e-02, 2.8491e-02, 1.4278e-02,
2.3789e-01, -2.3687e-03, -5.3264e-03,
-1.1161e-01, 1.9351e-02, 5.0832e-02,
8.2246e-03, 2.9892e-02, -3.7197e-02,
4.8236e-02, 1.6945e-01, 1.3673e-01,
1.1236e-01, 7.2318e-01, -4.1618e-02,
2.7494e-01, 1.0081e-01, -8.5399e-03,
-5.6151e-02, 8.1212e-02, -7.5770e-02,
2.7872e-02, 9.4644e-02, 1.1175e-02,
-6.1539e-02, 7.7395e-02, -3.2495e-02,
-5.1640e-02, 2.1028e-03, 1.5825e-02,
-1.1004e-01, 2.3153e-01, -6.1653e-02,
-2.6497e-02, 5.9461e-01, 4.0865e-02,
-1.9956e-02, 7.9328e-02, -1.7002e-02,
-5.5930e-03, 5.2015e-02, 7.7945e-04,
1.0136e-02, -9.0111e-02, -1.1175e-01,
-3.1781e-02, 1.4686e-01, -7.5718e-03,
1.1036e-02, 2.4618e-01, 8.5951e-02,
3.4775e-02, -1.2184e-01, 1.8010e-01,
-3.6781e-02, -1.3912e-01, -4.9172e-02,
3.3064e-02, 5.0582e-01, 1.0713e-02,
-1.2934e-02, -1.7697e-01, -1.4954e-01,
2.2229e-02, -5.8568e-03, -5.0186e-02,
1.9648e-02, -1.1302e-01, 1.5629e-02,
-3.5015e-02, 9.5032e-02, -2.9677e-02,
9.5173e-02, -3.0330e-02, -3.7652e-02,
-2.6097e-03, 7.4723e-01, -7.6234e-03,
-3.8826e-02, 1.0191e-01, 3.6589e-03,
-2.6503e-02, -1.1133e-01, -2.2029e-02,
-1.9101e-01, -2.1108e-01, -7.4371e-02,
-7.9349e-02, -1.0405e-01, 5.0315e-02
}
,
{
-4.2606e-02, -8.9001e-02, -6.4006e-02,
1.1132e-01, 7.6609e-02, 8.6417e-02,
7.6477e-03, -1.6416e-02, -8.2094e-02,
1.0779e-01, 2.1837e-01, 1.8094e-01,
-2.6306e-02, -1.2452e-01, 1.2662e-02,
3.1633e-02, 1.8717e-02, 3.1043e-02,
4.0927e-02, 5.0311e-02, 1.1648e-01,
2.2429e-01, 2.0757e-01, 4.3662e-03,
3.6341e-02, -4.7637e-02, 8.3645e-02,
-8.9260e-03, 1.8507e-02, 7.9069e-02,
-1.9411e-01, -8.6847e-02, -3.6639e-03,
4.0328e-02, -3.6821e-02, -8.5387e-02,
5.8173e-02, 5.9991e-02, -3.1398e-02,
1.5818e-01, 3.0861e-01, -2.3818e-02,
1.2176e-01, 6.7520e-02, 8.9401e-02,
-2.8859e-02, -1.2237e-01, -1.0625e-01,
3.1675e-02, 1.4172e-01, -1.4373e-01,
1.4653e-02, 1.0205e-01, 6.2557e-02,
-8.7292e-02, -2.1255e-02, 3.6830e-02,
-5.4417e-02, 3.0501e-01, 1.6897e-01,
-2.2187e-02, -8.9609e-02, -2.2830e-02,
4.9846e-02, 3.3395e-01, -3.1561e-02,
-1.3191e-02, 4.2663e-01, -6.9727e-02,
1.4570e-02, -4.0002e-02, 5.6394e-02,
-8.2547e-02, 1.9249e-01, 1.5591e-01,
1.4536e-01, -1.0409e-01, 1.2382e-01,
1.8189e-01, 9.2917e-02, -1.4394e-01,
-5.6260e-02, -2.7043e-01, 1.5392e-02,
-1.4305e-02, 1.1131e-01, -8.5913e-02,
7.7914e-02, -6.5484e-03, -1.8375e-01,
-1.4059e-01, -5.7339e-01, -3.9073e-02,
-1.1701e-01, -3.1806e-02, 7.7726e-02,
2.1688e-02, 9.9297e-02, 3.8224e-02,
7.9884e-02, 5.2461e-02, 1.0318e-01,
4.0054e-02, 1.4695e-01, 1.2577e-01,
-1.8790e-03, -4.9421e-02, 2.3235e-02,
-8.9820e-02, -1.6994e-01, -1.5986e-01,
2.3436e-01, -1.5346e-01, 1.5014e-02,
-3.9139e-02, -7.9388e-02, -4.9057e-02,
-1.1193e-01, -2.5705e-01, 1.1995e-01,
5.7929e-02, 2.4988e-01, -4.9406e-03,
-3.9363e-02, -1.1691e-02, -1.2236e-03,
-2.0521e-01, 2.1901e-01, 1.5957e-01,
2.1062e-01, -1.4157e-01, -3.4340e-01,
3.8520e-02, -2.0820e-01, 2.4570e-03,
1.7211e-01, 2.0214e-01, 1.3821e-01,
-7.1520e-02, 1.4847e-01, -1.3820e-01,
-2.4712e-02, -1.5925e-02, 1.7403e-02,
-3.7515e-02, 3.0461e-02, -2.7543e-02,
8.6148e-02, -6.1486e-02, 1.2610e-02,
2.9748e-03, 1.1778e-01, 2.9032e-02,
-2.1706e-02, -2.2406e-02, 2.6769e-02,
-3.6965e-02, 2.2180e-01, -4.0929e-02,
-3.2629e-03, 8.3419e-02, -1.4587e-01,
-1.3909e-02, -2.0166e-02, -1.0029e-01,
7.6360e-02, 8.0819e-02, -1.0933e-01,
-5.8919e-02, 2.4745e-02, 3.7375e-02,
-1.1333e-02, 1.4747e-02, -7.8958e-02,
-3.1535e-02, 1.7403e-01, 1.3946e-02,
-3.2038e-02, 5.1151e-02, -6.1063e-02,
-8.6472e-03, -6.9689e-02, 5.6846e-03,
5.7914e-02, -1.9818e-01, -7.5321e-02,
8.7453e-02, 7.8354e-02, 2.1997e-02,
-4.7606e-02, 1.3915e-01, 1.1653e-01,
9.6050e-02, 4.0099e-01, 1.5631e-01,
3.1492e-02, 2.4797e-01, 6.8716e-02,
-6.2664e-03, 9.1754e-02, -5.7244e-03,
1.3538e-01, 1.5366e-01, 9.4916e-02,
-4.2115e-02, -3.6585e-01, -1.4559e-01,
9.1550e-02, -5.4007e-02, 6.7482e-02,
-1.8687e-01, 3.2120e-01, 5.1031e-03,
-6.1205e-02, -5.1780e-02, 1.6442e-02,
-1.2316e-02, -1.3907e-01, -1.4446e-01,
-2.7899e-01, -8.5969e-02, -1.0870e-01,
-2.6157e-01, 8.9532e-02, 3.0958e-02,
-1.5393e-01, -4.2781e-02, -2.0951e-01,
2.0328e-01, 4.5317e-01, -3.0467e-02,
-6.1346e-02, 1.0381e-01, -1.3719e-01,
-9.8572e-02, -1.4035e-01, -1.9431e-02,
2.5542e-02, 3.2609e-01, 1.7983e-03,
-1.0800e-01, -2.9022e-02, 6.2691e-03,
2.8937e-02, -1.3483e-01, -4.1655e-02,
2.0172e-01, 1.4283e-02, 9.6200e-02,
1.9027e-02, 3.1240e-01, -2.9553e-02,
6.2776e-02, 1.3845e-01, 4.5834e-02,
-2.3854e-01, -4.0267e-02, 1.5634e-02,
-1.9246e-01, -3.2332e-02, 3.2442e-03,
-6.1880e-02, -8.8192e-02, -6.0172e-02,
2.5002e-01, 1.5148e-01, 6.4459e-02,
-2.1022e-01, -8.3893e-02, 6.9554e-03,
7.0244e-02, -2.9551e-02, 1.6481e-02,
-3.1036e-02, -2.0026e-01, -8.4748e-02,
-1.3108e-01, -1.3784e-01, 9.4900e-02,
-2.1256e-01, -4.1767e-02, 8.4665e-02,
-4.0235e-01, 1.0604e-01, -3.1827e-02,
-4.9825e-02, -9.1267e-04, 1.5527e-02,
-6.5729e-03, -1.8932e-02, -3.4591e-02,
1.1066e-01, 9.3979e-02, 2.6059e-02,
-1.2395e-01, -2.4768e-01, -1.6304e-01,
8.8329e-03, -2.1606e-02, -4.0878e-02,
-1.5581e-02, -1.4829e-02, -1.5959e-02,
-1.0463e-04, -4.2903e-03, -4.6657e-02,
2.2995e-02, 1.7917e-02, -9.1404e-02,
-1.2326e-01, 1.4582e-01, -7.0959e-02,
-1.8058e-02, -8.5228e-02, 4.2799e-02,
-2.2829e-03, 8.6577e-02, -1.1909e-01,
-1.8061e-01, 1.1166e-01, -8.2255e-02,
-1.3190e-01, 7.7123e-02, 2.3224e-02,
1.8661e-02, 2.4461e-02, 3.6060e-02,
-4.5224e-02, -1.7672e-01, 1.6080e-01,
-4.2175e-01, -2.2557e-01, -1.0719e-01,
-2.9506e-02, 9.5020e-02, -6.6465e-02,
-7.2627e-02, 3.1236e-01, 5.5764e-02,
-2.8789e-01, -1.8915e-01, 9.0825e-02,
-5.8618e-02, 6.4082e-02, 4.8461e-03,
-5.9405e-02, 3.2644e-01, -7.1278e-02,
-1.8084e-01, 2.0858e-02, -9.3690e-03,
-7.6565e-03, -9.6854e-02, 7.6121e-03,
1.4791e-01, 4.5612e-01, 1.9889e-02,
-5.5498e-02, -1.1266e-01, 2.2790e-02,
-3.8821e-02, -1.5780e-02, 1.2549e-02,
-3.8232e-02, -2.8870e-01, 2.6216e-02,
1.0375e-01, -2.9621e-02, 1.8479e-03,
5.0207e-02, 1.5189e-01, 1.2533e-01,
1.8298e-01, -1.2870e-01, 3.0681e-01,
-1.9571e-02, -8.6302e-02, 9.1121e-02,
1.0113e-01, -1.8362e-01, 3.2642e-02,
1.7034e-01, -3.1077e-01, -4.8737e-02,
5.9144e-02, 5.6052e-03, 3.2360e-02,
-9.0123e-02, 7.7996e-02, 3.6297e-02,
-3.4389e-01, 1.1841e-01, -2.0900e-02,
9.4930e-02, -9.1504e-02, -4.5308e-02,
3.7723e-03, -3.7580e-02, -6.6410e-02,
5.2501e-02, -1.2530e-01, 3.5944e-02,
3.8378e-02, 9.5188e-02, 2.1952e-03,
-2.4333e-02, 2.7977e-01, 5.6961e-02,
-3.0605e-03, 8.3684e-02, 4.4848e-03,
-7.8935e-02, -1.9544e-01, -5.3311e-02,
-2.6595e-02, 1.2278e-01, -3.1659e-02,
-1.0103e-02, 4.7763e-01, 2.5359e-02,
8.1397e-02, 3.0548e-01, 9.7097e-02,
3.6232e-02, -1.1091e-01, 1.2841e-01,
1.9277e-01, 2.9322e-01, -1.6740e-01,
1.2107e-01, -6.2883e-02, 4.0603e-02,
-1.5750e-01, -8.6183e-02, -1.4194e-01,
1.1932e-01, -3.9175e-01, -5.4495e-02,
-1.4001e-02, -2.0594e-01, -8.2683e-02,
8.6156e-02, 2.1499e-02, 2.2080e-01,
5.5703e-02, -3.6307e-01, 8.3129e-02,
8.9280e-02, -3.5897e-02, 1.6106e-01,
9.1171e-02, -3.1102e-01, 1.2425e-01,
1.0278e-01, -3.1014e-01, -6.9138e-02,
8.0839e-02, -3.6183e-02, 1.0341e-01,
-1.8334e-01, -5.3700e-02, 2.3336e-01,
-1.4464e-01, -5.0320e-01, -2.9836e-02,
-1.7225e-01, -3.9499e-01, -1.7321e-01,
1.7510e-01, 1.7897e-01, -2.6518e-01,
2.3638e-01, 5.0270e-01, -4.9731e-03,
2.2603e-01, 2.5317e-01, 2.4079e-01,
-1.3159e-01, 1.5638e-01, 1.2480e-01,
-6.2164e-02, 7.9458e-02, -9.4804e-02,
8.5690e-03, 7.4971e-03, 8.6630e-02,
-1.3148e-02, 6.8660e-02, -7.4230e-03,
2.9702e-02, 1.2036e-01, 9.5504e-02,
-3.2694e-03, 8.6722e-02, -6.2433e-02,
3.2527e-01, 3.2087e-01, -9.4429e-05,
1.3556e-01, -7.0413e-02, 2.9383e-02,
2.0617e-02, 3.3218e-02, 4.4898e-02,
-4.8260e-01, -2.1329e-01, 1.5890e-02,
-2.6600e-01, -8.8519e-02, -4.3800e-02,
-1.7299e-01, -2.0757e-01, -2.6658e-01,
6.9707e-02, -4.4700e-02, 6.5570e-02,
2.3992e-01, 1.5078e-01, 2.8713e-02,
-9.1197e-02, 1.9765e-02, -1.8751e-02,
-9.9277e-02, -3.1437e-01, 4.0730e-02,
2.4208e-02, -8.8322e-02, -1.6245e-01,
1.3037e-02, -3.4708e-02, -4.4285e-02,
-1.3592e-01, -1.3575e-01, -7.4546e-02,
1.4670e-01, -1.3366e-01, 2.1553e-03,
8.1235e-03, -1.2068e-01, -5.7287e-02,
1.8015e-01, 2.1390e-01, 8.6923e-03,
2.8833e-01, 6.6345e-02, 1.4578e-01,
2.2338e-01, 2.6453e-01, -2.9112e-02,
1.4018e-01, -9.2824e-02, -2.2795e-02,
1.2360e-01, 2.2527e-01, -1.1817e-01,
-3.8872e-02, -1.9982e-02, -7.7514e-02,
1.7744e-03, 3.1736e-02, 4.5882e-02,
-2.5222e-02, 2.4298e-01, -3.8596e-02,
1.2545e-02, 3.1872e-02, 7.1925e-02,
7.9782e-02, -1.5533e-01, -1.4619e-02,
-1.2223e-01, -1.8631e-03, -9.8832e-02,
-1.6815e-02, -8.1440e-02, 6.8038e-02
}
,
{
2.3898e-02, 1.2411e-02, -3.2770e-02,
-2.6029e-01, 3.2690e-01, -1.8246e-01,
1.1224e-02, 8.0193e-02, -5.0412e-02,
-9.3849e-02, 2.0325e-02, 2.6309e-02,
1.2266e-02, 1.7698e-01, 2.7049e-01,
1.2918e-01, 2.0190e-01, 2.7352e-01,
-7.2100e-02, 1.3357e-01, -1.3702e-01,
2.2527e-01, 1.5821e-01, -2.3104e-01,
1.0182e-02, -1.5499e-01, 7.1906e-02,
1.5865e-01, 7.0950e-02, -6.3336e-02,
2.2661e-01, -4.2997e-01, -4.2013e-01,
1.7549e-02, -1.3142e-01, -3.1663e-01,
1.3617e-01, 1.4229e-01, -1.0707e-02,
-1.0986e-02, 2.8816e-01, -3.6239e-01,
2.2579e-02, -1.4332e-02, 7.1339e-03,
-1.4357e-01, -9.7608e-02, 1.4646e-01,
-5.3856e-02, 3.3898e-01, -2.4936e-01,
-2.9500e-02, 2.1799e-02, 1.1901e-02,
3.6996e-02, 2.1291e-02, 3.2150e-02,
9.8375e-02, 2.4476e-01, 2.2896e-01,
1.8392e-01, -7.4510e-02, -1.0152e-01,
4.4757e-02, -4.8053e-03, -6.7254e-02,
-4.8370e-02, -7.8975e-02, -3.6007e-01,
-3.8160e-02, 8.7707e-02, -1.4986e-01,
-8.7544e-03, -4.3522e-02, 7.3822e-02,
-1.4523e-01, 1.1433e-01, 4.4109e-02,
-1.6025e-03, 2.5459e-02, -9.3562e-02,
-2.9192e-02, -1.0975e-01, -5.0943e-02,
-1.1215e-01, 1.9907e-01, 7.9934e-02,
3.7066e-02, 3.0796e-01, -1.4034e-01,
-8.2315e-02, -2.0182e-02, -1.2824e-02,
-4.8007e-03, 1.2655e-01, -2.5157e-02,
2.7796e-02, -4.3032e-02, 2.5397e-02,
6.9377e-02, 2.3642e-01, 1.2713e-01,
2.7878e-02, -1.5325e-01, -1.4871e-01,
1.5800e-02, -4.5935e-02, 1.7370e-01,
4.8058e-02, -1.8725e-01, -6.7048e-03,
-1.3932e-01, -6.0768e-02, -1.6976e-01,
-2.1189e-02, 1.0311e-02, -2.2970e-02,
-7.0546e-03, 7.9481e-02, 1.2146e-02,
4.2666e-02, 3.5383e-01, 1.4381e-01,
5.4384e-02, -9.3862e-02, 4.8870e-03,
2.1141e-02, -6.6826e-02, -1.8526e-01,
1.3309e-01, 3.3452e-01, 1.1058e-02,
-1.6967e-02, 1.1094e-01, 5.3230e-02,
3.0409e-02, -4.7613e-02, -1.7737e-01,
-1.6678e-02, -7.8644e-02, 1.1743e-01,
7.3322e-02, -1.1354e-01, -1.5737e-02,
-1.2397e-03, -1.4685e-02, -1.0192e-02,
1.6045e-01, 3.6331e-02, 1.2219e-01,
1.3123e-01, 5.7578e-02, 1.0291e-01,
1.7424e-01, 1.0688e-01, 1.4263e-01,
8.9942e-02, -2.7141e-02, 3.1238e-02,
-4.0240e-02, -1.0930e-01, -2.1276e-01,
1.0357e-01, 5.7673e-02, 1.0356e-02,
-2.0864e-01, -1.9405e-01, 2.5094e-01,
-4.8277e-03, -1.3758e-01, 1.1562e-01,
-1.0358e-01, 2.0631e-01, -9.1445e-03,
-1.7602e-01, 1.0200e-01, 3.0032e-02,
-1.1495e-02, -4.5077e-02, -6.4748e-02,
-2.3072e-02, -3.2342e-02, 1.4503e-02,
-3.7052e-02, -1.2206e-01, 5.5395e-02,
2.8331e-02, -4.2812e-03, 6.9807e-02,
4.3593e-02, -6.7373e-03, 1.2760e-02,
3.2896e-03, -2.4007e-01, -5.2920e-02,
2.5193e-02, -2.1480e-01, 8.4654e-02,
2.2642e-02, 8.2132e-02, -2.3864e-02,
-2.9726e-01, 8.0405e-02, -1.3190e-02,
-1.1310e-01, -4.4342e-01, -6.3536e-02,
-6.7090e-02, 1.1797e-01, 1.5315e-01,
7.7829e-02, -1.4494e-01, 1.0233e-01,
9.7059e-02, 1.2772e-01, -2.4394e-02,
-2.6179e-02, 2.6721e-02, 1.1707e-02,
-4.8024e-02, -2.3366e-01, -1.6978e-01,
-2.4402e-01, -2.8572e-01, -2.4053e-02,
-2.7451e-03, 7.1959e-02, 4.4706e-02,
-1.9900e-01, 2.1353e-01, 1.0625e-01,
4.0246e-01, 4.2323e-01, 3.4046e-02,
-1.6943e-01, -2.0221e-01, -1.6369e-01,
1.3882e-01, 2.1717e-01, -1.3581e-01,
1.3975e-01, 1.1980e-01, 1.8888e-02,
-1.8110e-01, -2.6143e-01, -1.0109e-01,
5.5844e-02, -1.2175e-01, 3.4447e-02,
8.9688e-02, 2.4641e-01, 2.3287e-01,
-5.8259e-02, -1.3656e-01, -1.3936e-02,
-8.3429e-03, 2.3026e-01, 1.2302e-01,
-2.2969e-02, 6.0932e-02, 3.4749e-02,
1.2910e-01, 2.4008e-01, 1.8908e-01,
-5.8776e-02, 3.8121e-01, 8.1312e-02,
9.1175e-02, -1.8729e-02, -4.6156e-02,
3.7493e-02, -3.5877e-02, -9.9651e-03,
1.5864e-01, 1.3611e-01, 6.7880e-02,
2.2216e-01, 9.3697e-02, 7.4782e-02,
-1.0861e-01, -2.5824e-01, 6.6455e-02,
9.2238e-02, -2.3448e-01, -3.4057e-01,
-2.9658e-01, 9.4698e-03, 1.9315e-01,
-5.2396e-02, 1.2310e-01, -5.2917e-02,
-4.3708e-03, 1.9560e-01, -2.4309e-02,
-6.7388e-02, -8.8839e-02, -2.0907e-02,
4.6550e-02, 3.4119e-02, 6.0977e-02,
-1.0054e-02, 1.4411e-01, 1.5622e-01,
1.7401e-02, 2.5685e-01, -9.1853e-03,
-4.4530e-02, -1.8623e-01, -8.4557e-02,
9.5962e-02, 2.6491e-01, 1.7854e-01,
-2.0547e-02, -1.2023e-01, -7.6897e-02,
-1.3418e-01, -1.4960e-01, 1.6292e-01,
-1.7275e-01, -6.0181e-02, -2.7034e-02,
-7.4189e-02, -3.5566e-02, 1.3995e-01,
3.0758e-02, 3.3476e-02, 6.9837e-03,
-6.1089e-02, -9.6021e-02, 7.1716e-03,
1.0389e-01, 4.7963e-02, 9.5921e-02,
4.4569e-02, 1.2230e-01, -1.4417e-01,
-1.2825e-02, 3.1980e-01, -3.5905e-01,
-1.2557e-01, -7.5283e-02, -1.2343e-01,
1.9791e-01, 7.9003e-02, 3.1163e-02,
1.0969e-01, 1.6839e-01, -2.5816e-01,
-1.2617e-01, 1.3686e-01, -2.1078e-01,
-2.1870e-02, -1.8378e-01, -2.8893e-01,
-8.2523e-02, -3.0475e-02, 9.6007e-02,
1.0669e-01, -1.4581e-03, 3.2441e-01,
-8.1872e-03, 1.1690e-02, -4.0179e-02,
-1.0835e-01, 3.6112e-01, -4.5990e-02,
-1.2355e-01, -1.3372e-01, 3.8136e-02,
-9.1530e-03, 3.5432e-02, 4.3950e-02,
-8.6859e-02, 1.5887e-01, 1.2796e-02,
1.3554e-02, -1.5669e-01, -1.4371e-02,
-4.6609e-02, 1.7114e-01, -7.8284e-02,
1.7611e-01, 4.1204e-01, 9.3281e-02,
1.1420e-01, 1.2951e-01, -7.6025e-02,
-5.4831e-02, 9.7574e-02, 3.2839e-02,
3.8475e-02, -6.0247e-02, -2.9627e-02,
-2.4367e-02, 1.3143e-02, 4.7017e-02,
2.3800e-02, -2.4046e-02, -5.7044e-02,
2.7280e-02, 7.8573e-01, 1.0079e-02,
6.4100e-02, 5.1584e-02, 7.9653e-03,
-8.9480e-02, -1.6207e-01, -8.9418e-02,
-3.5589e-02, 3.5903e-01, -1.8381e-01,
9.2356e-02, 8.8046e-02, -5.0229e-02,
1.8609e-02, 1.1243e-01, 5.2599e-02,
-1.3374e-02, -3.3097e-01, 6.5346e-02,
2.6760e-01, -1.0281e-01, 1.1607e-02,
7.6576e-03, -3.5957e-02, 3.1924e-02,
-7.0088e-02, 9.1241e-02, 1.2827e-02,
3.7165e-02, 7.0273e-03, -7.3945e-04,
-6.5406e-03, 7.2666e-02, -5.7348e-02,
-1.9100e-01, -7.4449e-02, -1.2496e-01,
1.5299e-01, -8.8047e-02, -2.1810e-02,
-3.0241e-02, -7.4310e-03, -8.7682e-02,
-2.2479e-02, 9.6008e-02, -8.4539e-02,
-2.8915e-02, 1.7538e-01, -3.7735e-02,
-9.8463e-03, -6.9618e-02, -2.6095e-01,
9.9950e-02, 5.0534e-01, -1.8812e-01,
-1.1986e-01, 7.1166e-02, -2.4769e-02,
8.8529e-02, 9.8348e-02, 2.1136e-02,
-9.0337e-03, 1.3679e-01, -1.2115e-01,
-6.2478e-03, 1.1436e-01, -3.4610e-02,
-2.7350e-02, 1.0702e-01, 1.6220e-02,
1.0912e-02, 1.0953e-01, 8.6762e-02,
2.9348e-03, -2.2035e-02, 1.2376e-01,
7.0102e-02, -1.0945e-01, -1.6640e-01,
-3.9916e-03, -2.6658e-02, -9.7031e-02,
-3.0047e-02, 1.6631e-03, -5.5031e-02,
-7.9624e-02, 1.9976e-01, 1.9582e-01,
2.1377e-01, 3.5835e-01, 1.7012e-01,
-9.7751e-02, 4.9143e-01, 1.0988e-01,
8.4055e-02, -7.3187e-03, -9.8808e-02,
5.0590e-02, -8.9291e-02, -6.6857e-02,
9.6737e-02, -3.0699e-01, 2.2889e-01,
2.6727e-40, -5.2704e-40, -4.5038e-40,
-3.3108e-40, 5.2330e-40, -1.2724e-40,
-3.2957e-40, -5.8613e-40, 2.1618e-40,
-4.3882e-40, -3.3950e-40, 5.9372e-40,
2.7277e-40, -1.3741e-40, -3.3597e-40,
5.0687e-40, 4.7873e-40, -3.2116e-40,
-6.1388e-40, -6.0790e-40, -5.2667e-40,
-5.6524e-40, -6.1696e-40, -5.9796e-40,
1.5824e-40, -5.2002e-40, -5.8960e-40,
-5.9860e-40, 3.6419e-40, 2.9975e-40,
-5.8988e-40, 3.3994e-40, -5.0611e-40,
3.6410e-40, 2.9550e-40, 4.7468e-40,
2.7503e-40, -3.4103e-40, 6.0339e-40,
-1.7691e-40, 6.7170e-41, 1.7101e-40,
2.7166e-40, 4.3023e-40, 2.7735e-40,
-3.1937e-40, -4.9247e-40, -6.2495e-40,
5.2938e-40, -3.3702e-40, 1.4976e-41,
1.4031e-40, -4.6995e-40, -5.2409e-40,
2.5460e-40, 2.6670e-40, -4.5339e-40,
4.2896e-40, -5.7141e-40, -1.7003e-40,
2.3597e-40, 1.3748e-40, 4.6163e-40,
4.0680e-41, -6.1642e-40, 2.7304e-41,
5.2250e-40, -3.9481e-40, -6.1808e-40,
1.9462e-40, 2.6005e-40, -2.7281e-40
}
,
{
1.3625e-02, -8.5594e-02, -1.9901e-01,
-6.4636e-02, -1.9030e-02, 4.1963e-02,
-7.5507e-02, -2.4474e-01, -4.2621e-02,
2.8195e-02, 7.3102e-02, -9.3331e-02,
7.7093e-02, 1.7800e-01, -7.6451e-02,
2.8565e-02, -1.3540e-01, -1.9169e-01,
-1.8583e-02, 3.0135e-02, 8.1094e-03,
-1.2835e-01, -1.8041e-01, -8.9020e-02,
-8.2731e-02, 3.7861e-02, -9.4014e-02,
4.6595e-02, 2.2052e-02, -1.5867e-01,
-1.0937e-02, 1.0030e-01, -1.3018e-01,
-9.1844e-02, -1.7508e-01, 2.2087e-01,
-9.3080e-02, 9.8069e-02, -7.0154e-02,
-6.6063e-02, -2.2142e-01, 4.1058e-01,
-6.5947e-02, -5.4662e-02, 9.9412e-02,
-5.1938e-02, 3.0932e-03, 1.8126e-01,
3.6701e-02, -3.0349e-01, 9.9839e-02,
2.5810e-02, 2.3644e-01, -2.4461e-01,
2.1054e-01, 1.5630e-01, -1.9587e-01,
5.0146e-02, -1.8844e-02, 3.6675e-01,
-4.0389e-03, 3.1596e-01, 3.6771e-03,
-2.2256e-40, 1.4272e-40, -2.0732e-40,
5.5913e-40, -6.0538e-40, 1.2791e-40,
4.5825e-41, 4.1080e-41, -1.8211e-40,
2.2687e-01, -5.8992e-02, 4.7796e-03,
6.0603e-01, 2.7961e-01, 1.5973e-02,
2.3035e-01, 1.3031e-01, -9.9280e-03,
-4.7235e-02, 5.1773e-02, -4.8586e-02,
-1.4510e-01, -1.7336e-01, 1.0981e-01,
-2.0303e-01, -1.6008e-02, -1.8524e-03,
-2.3440e-01, -3.2373e-02, -6.7911e-02,
-1.6256e-01, 1.2316e-01, 2.7859e-02,
8.5089e-04, -3.7401e-02, -1.8672e-02,
-1.0418e-01, -7.8407e-02, -1.8413e-02,
8.2834e-02, 2.3128e-01, 3.2983e-02,
3.1099e-02, -6.4485e-02, -8.1659e-02,
1.9152e-01, -1.9609e-02, 2.7364e-02,
1.0458e-02, -1.2507e-01, 4.1334e-02,
-4.6215e-02, 5.6944e-02, 2.1477e-02,
-1.4934e-01, -6.8383e-02, 2.7957e-02,
-3.6846e-01, 4.8766e-01, 6.4000e-02,
-3.9621e-02, -8.1667e-03, 4.5997e-02,
-6.1391e-02, 1.2976e-02, -3.2152e-02,
7.5767e-02, 1.2931e-01, -2.3498e-02,
4.0320e-02, 1.3876e-02, 1.1022e-02,
-6.2401e-41, 5.8564e-40, 3.9473e-40,
-5.6890e-40, -2.6022e-40, -2.9841e-40,
-4.2456e-40, -1.1546e-40, 4.4955e-40,
-4.2969e-02, -1.0995e-01, 1.3021e-01,
1.0142e-01, 5.2225e-01, -5.5486e-02,
-7.2349e-02, 8.5470e-02, 2.3438e-02,
-1.0690e-01, -1.4370e-01, -1.2632e-01,
2.8754e-02, 1.1662e-01, 5.6515e-02,
-1.5726e-01, -1.4945e-01, -4.4956e-02,
1.6574e-01, -5.6894e-02, -2.0851e-01,
8.1498e-03, -2.5441e-01, -1.4412e-01,
-1.0959e-02, -2.5811e-02, 8.8934e-02,
6.3594e-02, -9.3314e-02, 7.8247e-02,
4.6795e-02, -2.2774e-01, 7.1041e-02,
1.4830e-01, 1.9911e-01, 5.1978e-02,
7.4936e-02, 2.3104e-02, 6.3928e-02,
-1.3118e-02, 6.7544e-02, 7.9514e-02,
2.2335e-02, -9.9442e-02, 6.8070e-03,
2.4395e-02, -3.3576e-02, 5.5508e-02,
-4.0872e-02, 5.4501e-02, -5.7051e-02,
8.6621e-03, -1.5361e-01, 1.2630e-01,
-2.2344e-01, 1.3335e-01, -1.1688e-01,
-2.4232e-01, 3.3319e-01, -1.2580e-01,
-2.2169e-02, 2.0594e-01, 2.6521e-02,
4.1883e-40, -3.4540e-40, 4.9152e-40,
-1.5711e-40, 3.3927e-40, -5.5069e-40,
5.5831e-40, -5.2011e-41, 1.0351e-40,
1.7989e-01, 2.3787e-02, 5.7447e-03,
4.8748e-01, 3.0152e-01, 3.5517e-02,
2.2155e-01, 1.8812e-01, 3.0994e-02,
7.8657e-02, -7.1135e-02, -5.8293e-02,
-1.4220e-01, 1.6004e-02, -2.5180e-02,
-1.6811e-01, -2.3441e-01, 1.4810e-02,
5.3140e-02, -1.2904e-01, -1.5105e-02,
5.4525e-02, -1.5418e-01, 6.6507e-02,
8.3947e-02, -1.1975e-01, 5.3902e-02,
8.0834e-02, -2.4321e-01, -1.0282e-03,
3.1276e-03, 3.2495e-01, -1.3238e-02,
4.5285e-02, 5.8777e-02, -1.3231e-01,
-6.0928e-03, 8.7145e-02, 6.2031e-02,
-5.3919e-01, -6.8810e-02, -1.0755e-01,
-2.2571e-02, 2.6237e-02, -6.8731e-03,
-6.6771e-02, -2.0586e-01, 4.7722e-02,
-3.4968e-01, 3.0912e-01, 2.4487e-01,
-4.9537e-02, -5.2779e-04, 6.7840e-02,
1.7583e-02, 3.3222e-02, -5.7070e-02,
-2.3250e-01, 1.4470e-01, -4.9895e-02,
3.3147e-02, 8.6319e-02, 4.4719e-02,
-6.9454e-41, 2.0308e-40, -1.1977e-40,
5.9045e-40, -2.6129e-40, 4.8298e-40,
4.7288e-40, 6.0736e-40, 2.2462e-40,
-4.0294e-02, -9.1437e-03, -2.4926e-02,
-2.1269e-01, 1.1602e-01, 1.4383e-02,
5.1456e-02, 6.9047e-02, 1.6519e-02,
6.3737e-02, -9.0181e-02, 7.0716e-02,
7.0061e-02, 7.9046e-02, -4.3925e-02,
7.4396e-02, -5.2797e-02, 3.8125e-02,
7.5999e-02, -5.1307e-02, 2.4326e-03,
-3.1716e-02, -1.2567e-01, -3.3898e-02,
8.4925e-02, -5.2404e-02, 2.8535e-02,
9.6844e-03, 4.6980e-02, 3.8552e-02,
-5.7110e-02, 3.2163e-02, 1.5219e-02,
6.6905e-02, -2.7934e-02, 1.4184e-03,
-2.4239e-02, -8.6317e-03, -2.3295e-03,
-2.3065e-02, 1.0076e-01, 2.1562e-03,
-1.3647e-02, -3.4262e-02, 2.5777e-02,
7.6601e-02, 1.3654e-01, 2.1458e-03,
1.4542e-01, 3.6310e-01, 1.6266e-01,
-5.8465e-02, 4.3751e-02, 1.9227e-02,
9.1783e-03, -5.9547e-02, -1.8234e-02,
-5.3399e-02, 1.9218e-01, -4.6238e-02,
-1.9052e-01, 1.4635e-02, 2.9536e-02,
1.4621e-40, -5.5132e-40, -4.6215e-40,
4.3948e-40, -2.7285e-40, -5.5709e-40,
1.9428e-41, -4.0333e-40, -5.4469e-40,
9.3126e-02, -1.3236e-01, 9.9350e-02,
-1.3308e-01, 3.5030e-01, 9.2221e-02,
1.1783e-01, 1.6648e-01, -7.9150e-02,
2.2654e-01, -1.2546e-01, -1.2354e-01,
-1.6457e-01, -6.0740e-02, -3.1069e-02,
-8.3203e-02, -1.8064e-01, 4.6900e-02,
1.2059e-01, -1.0569e-01, -7.1196e-02,
-9.2991e-02, -1.7587e-01, 1.3100e-03,
-1.5492e-01, -1.3849e-01, 1.2245e-01,
-5.5276e-02, -9.7867e-02, 3.5550e-02,
-6.0264e-02, 4.7760e-02, 6.0242e-02,
-5.4096e-03, 2.4646e-01, 6.3592e-01,
5.8559e-02, 6.1117e-02, 8.0334e-02,
-4.4582e-03, -1.2028e-01, 8.7394e-02,
-2.5880e-02, -1.2206e-01, 1.2199e-01,
4.1990e-02, -1.3283e-01, 4.9047e-02,
-4.9532e-02, 2.7688e-01, -4.6064e-03,
-2.8812e-03, -2.4404e-01, 5.8614e-02,
-1.4262e-01, -1.2810e-03, -1.2060e-01,
-8.3595e-02, 5.6532e-02, -7.7556e-02,
-1.3364e-01, -1.3883e-01, -1.2335e-01,
-1.3273e-40, 6.5184e-41, -4.6946e-40,
-4.0031e-40, -1.2807e-40, -3.1584e-40,
1.3009e-40, 2.4187e-40, -1.4202e-40,
-8.8844e-03, 1.0101e-03, -6.0190e-02,
-1.8851e-01, -7.6662e-02, -1.4562e-01,
2.9983e-02, -8.1533e-02, 1.1256e-02,
1.0205e-01, 6.7850e-02, -1.0911e-01,
-1.2846e-01, -5.4605e-02, 6.2182e-02,
-1.0797e-01, -5.1281e-02, -1.2036e-02,
-8.1693e-02, -7.0432e-02, 1.6990e-01,
-1.7329e-01, -2.2084e-01, -3.0977e-02,
8.2771e-02, -3.3089e-01, -1.4842e-01,
1.9576e-02, -1.5953e-01, -1.0348e-01,
6.6014e-02, 6.0094e-01, -6.9891e-04,
7.4969e-02, -1.4250e-01, 4.3221e-02,
1.6796e-02, -6.8125e-03, 4.7028e-02,
-3.3421e-01, -2.2987e-01, 4.2936e-02,
9.3985e-04, 9.0827e-02, 2.4211e-01,
-8.1571e-02, -1.0276e-01, 1.9092e-01,
2.1112e-01, 2.6837e-02, -2.5822e-01,
-1.3290e-01, 1.6135e-01, -2.7672e-02,
3.4465e-01, -8.3286e-03, -6.1936e-02,
2.7406e-01, -6.8357e-02, 1.7426e-01,
-9.0872e-02, 1.2999e-01, 7.2366e-02,
3.0944e-40, -1.2808e-40, 2.9336e-40,
5.5561e-42, 3.0978e-40, 1.0027e-40,
-1.5881e-40, -2.9858e-40, 3.1599e-41,
-9.1935e-02, -2.2666e-04, -6.2821e-02,
-1.8605e-01, 3.0238e-01, 3.2759e-02,
-5.0771e-02, 1.4585e-02, -1.0872e-01,
2.5511e-02, -9.3394e-02, 1.4810e-02,
-6.2906e-02, 9.2472e-02, 1.2845e-02,
-2.9041e-01, -9.6489e-03, -2.7277e-02,
-6.9896e-02, -1.1645e-01, -5.9870e-02,
-2.8037e-02, -2.2649e-01, 5.1781e-02,
-1.4588e-02, 4.8753e-02, -2.8256e-02,
-1.6462e-02, 8.0795e-02, 3.6222e-02,
8.0392e-02, 3.0118e-01, 2.0021e-01,
1.0394e-01, 6.4196e-01, 4.9545e-01,
2.1242e-02, -1.2514e-01, 1.0066e-01,
-4.7676e-02, -2.0736e-02, -5.6951e-03,
-8.3021e-02, 4.6763e-02, 1.7551e-01,
2.0038e-02, 1.8084e-01, 1.3244e-02,
1.0280e-02, 2.8740e-01, 8.9837e-03,
-2.9437e-02, -3.7366e-01, -1.1861e-01,
-4.8248e-03, -1.2970e-01, -1.8680e-02,
1.8458e-01, 5.6509e-02, 1.2734e-01,
1.9423e-01, -3.6960e-01, -2.5555e-02,
6.7959e-41, -3.2251e-40, -3.0631e-40,
-4.0701e-40, 9.7399e-41, 2.2917e-40,
2.0169e-40, 5.7891e-40, -4.1286e-40
}
,
{
5.6253e-02, 1.0118e-02, -8.2749e-02,
-6.4074e-02, 4.0723e-02, 1.1657e-02,
-1.1560e-01, -3.5596e-03, -2.6713e-02,
-7.9090e-02, -2.9223e-01, 1.5759e-01,
6.8756e-02, 1.5738e-01, 1.5413e-01,
-6.1288e-02, -1.2536e-01, -1.5966e-01,
1.1165e-01, 5.0211e-02, -1.0338e-01,
-5.2364e-04, 1.7660e-01, -2.2504e-03,
-1.7697e-01, 1.8500e-02, 2.0693e-02,
-2.5907e-02, -1.4201e-01, 8.4467e-02,
1.1138e-02, 2.1769e-01, -4.2422e-01,
6.5046e-02, 2.6834e-02, 2.9047e-03,
-1.2130e-01, -5.1773e-01, -8.0393e-02,
3.0204e-02, 3.5952e-01, 1.6681e-01,
-9.4720e-04, 7.7291e-02, 8.3039e-02,
3.4689e-01, -1.2389e-01, -2.0666e-01,
-2.9650e-02, 1.1102e-01, -1.4782e-01,
3.2193e-02, -3.9862e-02, 1.6440e-02,
-8.4264e-02, 1.0192e-01, -6.4256e-02,
2.2950e-02, -6.6511e-02, -6.3814e-02,
4.3744e-02, -1.0557e-01, -1.2045e-02,
1.6330e-01, 6.6130e-01, 1.5497e-01,
1.7103e-01, 1.5073e-01, 1.7400e-01,
9.0985e-04, 1.0917e-02, -1.3322e-02,
-6.4273e-02, -6.2178e-02, -7.7223e-02,
-1.0332e-01, -2.1072e-01, -2.2843e-03,
3.2717e-02, -6.3754e-02, 5.0359e-02,
-5.2566e-02, 6.2090e-02, -1.5614e-02,
1.4570e-02, -1.0243e-01, 1.3091e-01,
-2.9988e-02, -7.5897e-02, -9.4541e-04,
-2.7999e-01, -4.7415e-03, 5.6419e-02,
7.0565e-02, -4.9273e-01, -1.2936e-01,
5.5685e-02, -5.8924e-03, -3.1967e-02,
8.8602e-02, 2.9337e-01, 1.3753e-01,
1.0063e-02, 1.6348e-02, 1.0063e-01,
3.6230e-02, 1.7968e-02, -1.1624e-01,
-2.2488e-02, 1.3474e-01, -1.1419e-01,
2.8576e-02, -7.4794e-02, -7.7261e-02,
5.8874e-02, -2.9448e-03, 6.0207e-02,
1.4642e-01, 1.2321e-01, -2.4936e-01,
2.2609e-02, -2.8171e-01, 1.1510e-01,
2.6056e-02, -2.7532e-02, -4.7505e-02,
-2.8762e-02, -1.2610e-02, -8.3766e-02,
-5.0992e-02, -5.7269e-03, -7.0981e-02,
-9.6191e-02, -9.2384e-02, -5.3328e-02,
2.3989e-01, 3.9819e-01, 1.8451e-01,
3.6888e-02, 1.1023e-01, 4.4804e-03,
-4.4140e-03, -4.8275e-03, 2.0018e-02,
-2.4346e-02, -6.5546e-02, -4.6065e-03,
2.2298e-01, 2.8810e-01, 1.4071e-02,
-1.7315e-01, -5.7961e-02, -9.9136e-02,
3.6456e-02, -1.5518e-02, 6.4490e-02,
4.6983e-02, 5.2743e-02, 3.0802e-01,
6.7940e-02, 5.8777e-03, 3.1155e-01,
9.9510e-02, 2.7974e-02, -6.6716e-02,
3.7042e-01, 2.0813e-01, -3.1581e-02,
7.9064e-02, -1.3699e-01, -4.4722e-02,
-8.4753e-03, 8.0676e-02, 1.5771e-01,
-1.1467e-01, 5.6269e-02, 1.1369e-01,
-1.4727e-02, 3.7263e-02, -2.0554e-01,
8.3383e-02, 4.5848e-02, -1.1732e-02,
4.5494e-02, -2.1406e-01, 6.0591e-02,
4.6503e-02, -1.0362e-01, 3.8794e-02,
-4.6633e-01, 1.4504e-01, 1.4999e-01,
2.9642e-01, -4.8807e-01, -1.6012e-01,
1.6708e-01, 9.5313e-02, -7.5981e-02,
-4.2655e-02, 9.2470e-02, -7.7242e-02,
-2.1021e-01, 1.2423e-01, 1.4967e-02,
-5.4129e-02, 7.4355e-02, -4.7068e-02,
-1.6048e-01, 9.8742e-02, 4.4282e-02,
-6.0187e-02, 1.9495e-01, 8.3291e-02,
-7.5190e-02, -6.8429e-02, 3.7391e-02,
5.1413e-04, 1.5098e-01, -1.1549e-01,
1.6875e-01, 1.8040e-01, -1.3162e-01,
7.7101e-02, 2.0816e-01, 7.6289e-02,
-1.7528e-02, 1.4408e-02, 3.7500e-02,
3.8647e-02, 1.6850e-01, 1.7535e-02,
-2.8205e-02, 1.0273e-02, 1.6688e-01,
4.3676e-02, 6.9895e-02, 8.1063e-03,
-2.6117e-01, -1.0920e-01, 5.2209e-02,
-5.2749e-02, -1.7062e-02, -9.6808e-02,
2.7324e-02, 9.1342e-02, -5.0968e-02,
1.0689e-01, 5.0565e-01, 4.6004e-01,
-6.6862e-03, 3.4162e-03, 3.3559e-01,
3.5084e-02, 1.9123e-02, 1.0073e-02,
1.6995e-01, 3.4099e-01, -4.0847e-01,
-5.5317e-03, 4.0230e-02, -2.0305e-01,
-8.9786e-02, 1.9667e-01, 3.8111e-02,
3.0607e-02, -1.9084e-02, -6.5114e-02,
8.5394e-02, -1.3992e-01, 1.4988e-02,
-1.5926e-02, -9.1200e-03, -7.2328e-02,
1.3548e-01, 7.1040e-01, -9.4208e-02,
2.5411e-03, -7.2159e-02, 1.0848e-01,
-8.9029e-02, -8.6339e-02, -2.7546e-02,
6.0378e-02, 2.8401e-01, -6.6550e-02,
-3.0486e-02, 5.0307e-02, -1.1084e-02,
2.9732e-02, 9.9960e-02, -7.7408e-02,
3.4940e-01, -5.6048e-01, 2.9053e-02,
-2.6991e-02, 4.9637e-02, -3.9322e-02,
-1.0418e-02, 1.0931e-01, -6.1609e-02,
3.6057e-02, 9.3866e-02, -1.0339e-01,
-1.8572e-02, -2.0889e-02, -7.4531e-02,
-7.3236e-02, -4.5908e-02, 2.2705e-02,
-1.5148e-02, 2.1735e-01, 2.2477e-02,
-3.4153e-02, -2.6939e-02, -5.0167e-03,
6.6774e-02, 2.0168e-01, -7.5083e-02,
5.6608e-02, 2.2799e-01, -3.7473e-01,
-7.2336e-02, 4.4329e-02, -3.6747e-02,
3.5355e-02, 1.8671e-01, -4.0167e-02,
1.2871e-01, 3.5050e-01, 1.8090e-01,
-6.2429e-02, 6.2184e-02, 6.8804e-02,
-8.0164e-02, -2.4387e-02, -5.0309e-03,
1.0089e-01, -3.0008e-02, 1.7251e-02,
-9.4662e-03, -1.4760e-02, 7.3434e-03,
7.3290e-02, 2.2546e-02, -2.9015e-02,
7.9944e-02, -2.6972e-01, 7.1349e-02,
-1.7026e-02, 1.1461e-01, -4.1288e-02,
-5.3732e-02, -2.4618e-01, -1.2890e-02,
8.6133e-02, 1.9503e-01, 8.2202e-02,
-1.0060e-03, -4.5931e-04, -1.8789e-02,
-4.0843e-02, -7.8149e-03, -6.1464e-02,
-7.9364e-02, -5.9647e-02, -5.4059e-03,
1.9553e-01, -2.4079e-01, -7.9538e-03,
5.3620e-02, 1.4198e-01, 6.5651e-03,
2.3512e-02, -2.6609e-02, -4.6435e-02,
1.2499e-02, 5.1079e-02, -2.2713e-02,
-7.1554e-02, 1.0608e-01, 5.8972e-02,
1.8638e-01, -2.1053e-01, -6.4009e-02,
1.0851e-01, 7.2187e-02, 8.9722e-02,
-4.5365e-04, 1.0826e-01, -6.4141e-02,
-2.3874e-02, -4.6307e-02, -2.7813e-02,
1.8385e-02, 9.4687e-02, 6.8374e-02,
9.4526e-02, 1.4432e-02, 1.5937e-01,
1.1292e-01, -3.4274e-01, -1.0813e-01,
-7.4636e-03, 3.7101e-02, 3.7226e-02,
3.7079e-02, -3.9169e-02, -3.7752e-02,
-7.9021e-02, 8.5978e-02, 1.0958e-02,
-5.8576e-02, 5.5931e-02, 4.8301e-02,
-1.3402e-01, -3.3809e-01, -4.4369e-02,
1.4262e-01, 6.5254e-02, -3.3366e-01,
1.2416e-02, -9.0492e-02, -5.8205e-02,
-1.4886e-01, 4.0598e-02, -1.4219e-01,
2.0223e-03, -2.8673e-01, -3.3622e-01,
1.9191e-02, -2.2104e-02, 1.9048e-02,
6.0021e-02, 2.2520e-01, -5.3972e-02,
1.6226e-01, -2.1918e-01, -5.2117e-02,
-6.2363e-03, 2.0266e-01, -7.3323e-03,
1.1137e-01, -1.9300e-02, -5.4983e-02,
-1.8338e-01, 6.2511e-01, -1.7909e-01,
1.7003e-01, 1.7902e-01, 5.4462e-02,
5.6847e-02, -7.4696e-02, -1.1354e-02,
1.0544e-01, -1.4918e-01, 4.8208e-02,
-5.6262e-02, -2.3303e-01, -2.9916e-02,
-3.3261e-02, 1.3287e-01, 1.9831e-02,
-1.3907e-01, -1.6180e-01, -7.2323e-03,
-5.1689e-02, 6.3121e-02, -1.4480e-01,
1.1143e-01, 4.9625e-02, -5.4369e-02,
-3.9247e-01, 2.3412e-01, -3.6726e-02,
-1.1468e-02, 3.4045e-02, 6.6454e-02,
-5.0103e-02, 6.1740e-02, 4.2922e-03,
1.7669e-01, -8.1250e-03, 6.3694e-03,
-6.7723e-02, 7.4576e-02, 1.0113e-02,
1.1264e-01, -4.4691e-02, -5.3575e-02,
3.4691e-02, -1.2201e-02, -8.4221e-02,
2.3677e-01, 3.9073e-01, 2.4710e-02,
-8.4580e-02, -1.0747e-01, -6.5695e-02,
1.5386e-01, 1.4041e-01, 6.9961e-03,
2.6138e-02, 2.3149e-02, -1.8820e-02,
-3.3541e-02, 3.2089e-02, -1.8916e-02,
1.0564e-01, -7.5319e-02, -5.4282e-02,
-6.9388e-03, -2.0873e-02, 5.6100e-02,
2.3524e-02, -6.4296e-02, 5.8950e-02,
-3.1415e-03, -4.1203e-02, 1.0781e-01,
1.7848e-02, -2.9535e-02, -1.6412e-02,
-4.6649e-02, 8.1277e-02, -5.9918e-02,
8.1522e-02, -9.2037e-02, 8.1039e-03,
-6.5541e-02, 5.1811e-02, -1.4380e-03,
5.0419e-02, 9.3091e-03, -2.8054e-02,
-3.0979e-02, -2.5366e-02, 3.5265e-02,
-3.7730e-02, 5.7574e-02, 3.4683e-02,
4.8819e-03, -2.9519e-02, 3.7740e-02,
6.4546e-02, -3.7272e-01, -8.5393e-02,
-3.0223e-02, -7.7899e-02, 2.7365e-03,
2.2282e-02, -3.3440e-02, 1.9048e-02,
2.3275e-02, -2.1153e-02, -2.0385e-02,
-4.6245e-02, 2.2443e-02, -3.0206e-02,
-2.5302e-02, -1.1418e-02, 4.8228e-02,
5.8367e-02, -4.3062e-02, 2.2814e-02,
-4.6279e-02, 5.0052e-02, 2.2961e-02,
-5.4984e-02, 1.4773e-01, -2.5546e-02,
3.3025e-02, -1.0138e-01, 6.3886e-02,
1.2403e-02, 1.6215e-02, 1.0783e-02
}
,
{
2.5042e-02, -5.3266e-02, 3.8484e-02,
3.7189e-03, 1.0493e-01, 1.4459e-01,
-3.7442e-02, -1.5744e-01, 1.9957e-01,
-1.9203e-02, 1.6256e-02, 4.2906e-03,
-3.1637e-02, 5.0287e-01, -6.9504e-02,
1.4677e-03, -8.9984e-02, -9.0376e-02,
4.0578e-02, 2.4004e-02, 3.4044e-03,
7.5916e-02, -1.3564e-01, -9.0296e-02,
3.4156e-02, 7.2494e-02, -2.0037e-02,
-6.4614e-02, -1.7301e-03, -3.3444e-02,
-2.7950e-01, 7.1351e-01, 4.2825e-02,
2.4797e-02, 5.4162e-04, -8.9676e-02,
3.8002e-02, -2.7692e-02, -1.7757e-02,
1.9356e-01, 1.9598e-02, -1.0862e-01,
2.5734e-02, 1.1703e-02, -7.3912e-02,
-6.0213e-04, 1.6024e-01, -6.4591e-03,
3.1779e-02, -3.1049e-01, 1.2684e-02,
-1.0098e-01, -1.8839e-01, 5.1387e-02,
5.2004e-02, 3.1489e-01, 5.9716e-01,
-7.2238e-02, 3.4332e-01, -2.0655e-01,
1.1013e-03, -5.0328e-02, -4.6118e-02,
9.4442e-04, 2.7964e-02, 1.7672e-02,
-8.6022e-02, -3.8280e-02, 2.8017e-04,
3.3824e-02, -6.7883e-02, 1.0529e-02,
-6.5982e-02, 1.1385e-01, 3.0091e-03,
1.2330e-01, 6.1876e-01, 5.7145e-02,
-4.3835e-02, -6.8186e-01, -1.0917e-01,
3.2006e-02, -2.0627e-03, -6.9043e-02,
7.2219e-02, -3.2393e-01, -2.6657e-02,
1.3523e-02, 1.8099e-01, 4.9168e-02,
7.1367e-02, 9.8283e-02, 1.0425e-01,
2.2286e-01, -5.9374e-01, 1.0014e-01,
6.5700e-02, 1.3618e-02, -7.4045e-02,
1.0481e-01, 3.0734e-02, 1.0431e-02,
-2.1314e-01, -7.2817e-02, 1.2036e-01,
-5.4180e-02, 1.0500e-01, 2.7821e-02,
-5.0657e-02, 8.7702e-02, 7.0234e-02,
9.0349e-02, 1.4905e-01, 1.1612e-01,
5.9924e-02, 2.4928e-01, 1.7078e-01,
-5.9110e-02, -7.4252e-02, 9.8241e-03,
-1.2006e-01, 1.3879e-01, -1.4322e-02,
-7.5463e-02, 1.4407e-02, -6.9202e-03,
7.0279e-02, 1.7065e-01, -2.5150e-01,
-2.6289e-02, 3.8421e-01, -2.2051e-01,
-2.8918e-02, 4.0074e-02, -7.1296e-02,
1.0357e-01, -1.8885e-01, 2.3780e-02,
-1.8884e-01, -4.3326e-01, -1.1465e-01,
3.3497e-02, -1.3462e-01, -3.4127e-02,
-1.2731e-02, 5.4326e-02, -2.6581e-02,
5.1753e-02, 6.8200e-03, 4.3246e-03,
-6.9963e-02, -1.5618e-01, 2.5192e-01,
2.2890e-02, 6.1421e-02, 5.2832e-02,
-9.8369e-02, -1.1452e-01, 1.7420e-01,
2.0392e-01, -1.1322e-01, 9.8462e-02,
-3.3547e-02, -2.8993e-01, 7.0080e-02,
8.2478e-02, -1.9881e-01, 1.2849e-01,
-2.7802e-01, -1.5621e-01, 6.2712e-02,
1.3028e-02, 1.4716e-01, 2.0434e-02,
-4.4071e-01, 3.8359e-01, -1.6655e-03,
-2.0297e-01, 1.5631e-01, 7.7086e-02,
9.6714e-03, -5.5842e-03, 7.9155e-03,
1.4525e-01, -3.2228e-01, 1.1454e-01,
1.4527e-01, -3.0399e-02, -6.7043e-02,
9.4233e-03, -1.1296e-02, -1.0927e-01,
7.9300e-02, 5.5286e-02, -1.1558e-01,
3.8173e-01, -5.4351e-02, -1.7890e-01,
5.4882e-02, 1.5119e-01, 1.8363e-01,
-8.8223e-02, -9.0083e-02, 4.8221e-01,
4.0890e-02, 5.6429e-02, -2.8538e-01,
1.2102e-02, -1.8177e-02, -3.1643e-03,
-6.9064e-02, 3.1853e-04, -7.0113e-02,
9.7308e-02, 1.0691e-01, -6.5919e-02,
-1.4536e-40, -1.7049e-40, -2.6781e-40,
4.5792e-40, 1.4489e-40, 1.3645e-40,
-5.8774e-40, -2.2505e-40, -4.7571e-40,
3.3670e-40, 1.5398e-40, -3.3819e-40,
2.6303e-40, -1.9434e-40, -5.5555e-40,
-4.3830e-40, -2.8750e-40, -3.0788e-41,
5.6364e-40, 3.1307e-40, -2.3064e-41,
2.8909e-40, -5.8115e-40, 2.9852e-41,
-1.9273e-40, -7.5503e-41, -6.0335e-40,
5.8073e-40, 2.9252e-40, -1.3038e-40,
5.2260e-40, 3.8172e-40, -2.0389e-40,
-2.1905e-41, 1.8473e-40, -2.9226e-40,
2.9957e-41, 2.6068e-40, 6.1324e-40,
-4.3013e-41, 5.1421e-40, -4.1157e-40,
2.1416e-41, -1.6614e-40, -3.0843e-42,
-4.3402e-40, 2.8507e-40, 1.1560e-40,
3.8826e-40, -3.0797e-40, -6.0685e-40,
5.4170e-40, -6.1858e-40, 9.3049e-41,
-1.9491e-40, -1.9211e-40, -6.2723e-40,
3.9906e-40, 1.2356e-40, 3.8682e-40,
2.8630e-40, 6.2303e-40, 5.3034e-40,
-4.1904e-40, 4.8916e-40, -3.6125e-40,
-5.5393e-40, -2.4980e-40, -6.1877e-40,
2.7289e-40, -1.8348e-40, -5.6663e-40,
2.5152e-02, -3.2878e-02, 2.1626e-02,
1.9879e-01, 2.9080e-02, -3.0331e-03,
-2.3380e-01, -2.3578e-02, 1.1871e-01,
-3.1824e-02, -5.5095e-02, 3.1338e-02,
-3.2199e-02, -4.3820e-01, 4.1391e-02,
-4.1207e-02, 3.7475e-01, -1.8548e-01,
-1.4460e-02, -8.7834e-02, -3.2343e-02,
2.4023e-01, 7.1916e-01, -1.8559e-01,
-6.7635e-03, -9.4409e-02, -1.7890e-02,
-5.8334e-02, 1.8886e-01, 6.1547e-02,
-2.6152e-01, 6.6722e-01, -1.2486e-01,
-4.8128e-02, 1.0510e-01, -4.2619e-02,
3.0101e-03, 9.6380e-02, 6.6140e-02,
1.0201e-01, -2.3240e-01, -1.8356e-01,
4.0019e-02, 2.2985e-01, -1.2980e-01,
-1.1400e-01, -1.9221e-01, -3.4158e-02,
2.2871e-02, -6.8684e-01, -1.0856e-02,
2.6311e-02, 2.5422e-02, -1.5190e-02,
3.2182e-02, -5.6346e-02, 3.2655e-02,
-1.6912e-02, 8.4264e-02, -7.9521e-02,
1.2788e-03, -7.1110e-02, 8.6585e-02,
-4.2829e-02, 1.0778e-01, -6.8129e-02,
5.8156e-03, -2.3998e-01, 1.9052e-01,
-4.1855e-02, 1.0140e-01, -1.7139e-02,
5.2301e-40, -2.9923e-40, 3.8688e-41,
3.1575e-40, 1.1504e-40, 5.5655e-40,
-3.4499e-40, 2.3050e-40, -6.3766e-41,
1.3282e-40, 4.5849e-40, 3.5308e-40,
-2.6657e-41, 5.9829e-40, 3.2791e-40,
-2.8348e-40, 2.5810e-40, 5.5791e-40,
4.2613e-40, 3.2607e-40, -2.0789e-40,
-3.9054e-40, -2.5608e-40, -2.7638e-40,
4.5027e-40, 2.7065e-40, -4.5593e-40,
1.6336e-40, -2.0391e-40, -5.9017e-41,
-7.9899e-41, -2.9870e-40, 5.6390e-40,
-2.5560e-41, -1.9786e-40, 9.4700e-41,
-7.4049e-41, -2.3902e-40, -2.8497e-40,
-1.8912e-40, -1.5589e-40, 5.5463e-40,
-2.1782e-40, -1.9532e-40, -2.3785e-40,
2.7539e-40, 4.0214e-40, 2.0732e-40,
7.0120e-41, -4.4200e-40, 7.3787e-41,
2.6452e-40, 1.1970e-40, 2.8298e-40,
5.2721e-40, 1.9304e-40, -3.8489e-40,
-3.9759e-40, 2.6184e-40, 1.2594e-40,
1.5831e-40, 3.7179e-40, -3.4915e-40,
-1.7681e-40, -6.9657e-41, -4.0746e-40,
8.0894e-41, 1.6950e-40, -1.0574e-40,
-1.0590e-40, 2.8466e-41, -2.7558e-40,
-5.4027e-40, 4.4355e-41, -3.2144e-40,
-4.8838e-41, -3.8595e-40, 2.5064e-40,
4.0365e-40, -1.0195e-40, 4.8356e-40,
4.4499e-40, -4.4871e-40, -2.4561e-40,
4.1687e-40, 5.2239e-40, -5.7603e-41,
-1.5211e-40, -3.5768e-40, 3.6385e-40,
1.6089e-40, 4.1624e-40, 4.5114e-40,
1.6438e-40, -3.6331e-40, 6.4961e-41,
5.0899e-40, 6.1036e-40, 2.4828e-40,
5.8681e-40, -5.7259e-40, -1.5371e-40,
5.2654e-40, 4.7412e-40, -2.0265e-40,
-4.8621e-41, 4.9497e-40, 3.0176e-40,
4.2235e-40, 4.5381e-40, 4.6501e-40,
-1.6124e-40, -1.9449e-40, 5.1497e-40,
-1.2891e-40, -1.6549e-40, 4.8348e-40,
-2.0735e-40, 1.3423e-41, -4.4109e-40,
-5.4218e-40, -1.1537e-40, -1.1664e-40,
5.6006e-40, 3.4109e-40, -3.1434e-40,
3.4969e-40, -5.3459e-40, 3.9245e-41,
2.4028e-40, 5.7774e-40, -6.2973e-40,
1.8802e-40, -4.6258e-41, -5.0716e-40,
3.4962e-40, -6.2313e-41, -2.7290e-40,
-5.2709e-40, -3.2225e-40, 2.4245e-40,
-3.6300e-40, -2.0794e-40, 4.0541e-40,
-3.5157e-02, 6.8337e-02, 1.6149e-02,
-5.8650e-03, 6.0605e-01, 3.1738e-02,
9.3306e-02, 2.1499e-01, 1.3609e-01,
6.4043e-02, -1.0253e-02, -6.2813e-04,
4.6828e-02, -3.9619e-01, -9.2633e-03,
-8.1752e-02, 9.9083e-02, 4.4296e-03,
7.1594e-02, 3.9860e-02, 8.1088e-02,
1.7750e-01, -1.2381e-01, 1.4476e-01,
2.3416e-02, 1.2819e-01, 1.0816e-02,
5.5296e-02, 5.5199e-02, -2.1253e-02,
1.7214e-01, 2.0542e-01, -3.7859e-03,
1.2831e-01, 3.2087e-02, -5.1851e-02,
-2.3686e-02, 1.2271e-01, -1.6009e-02,
-2.0176e-01, 7.4757e-01, -3.4526e-02,
-4.7055e-02, -3.7099e-01, -1.9216e-01,
-8.8030e-02, -2.5853e-02, -1.7087e-02,
-2.0533e-01, 1.5214e-01, -1.8639e-03,
-1.1236e-01, -2.4612e-01, 6.3094e-02,
2.3829e-02, -5.0078e-03, 5.3854e-02,
-9.6934e-03, 3.7047e-02, 4.7325e-01,
5.6975e-03, -8.6108e-02, 6.5569e-02,
-3.9768e-03, 2.0580e-02, -4.1931e-02,
6.9577e-02, -1.0416e-01, -2.5037e-03,
-1.9198e-02, 6.2027e-02, -1.0833e-02
}
,
{
-5.3430e-40, 2.5717e-41, 5.7504e-40,
7.1679e-41, 6.2076e-40, -8.4201e-41,
-4.2111e-40, 3.4851e-40, 1.3009e-40,
3.3016e-40, -7.6473e-41, -1.8392e-40,
2.2773e-41, 1.2087e-40, 1.1565e-40,
6.5190e-41, 2.0075e-40, 2.5796e-40,
5.0575e-40, -2.6261e-40, -2.5486e-40,
-3.9886e-40, -6.0644e-40, 2.9264e-40,
8.9627e-41, -3.0550e-40, -2.3456e-40,
-4.8855e-40, -4.8867e-40, -5.0492e-40,
-1.0706e-40, 5.3827e-40, -1.6413e-40,
1.4714e-40, -3.4024e-40, -4.4881e-40,
3.2361e-40, 2.0858e-40, 3.8836e-40,
2.0949e-40, 5.9633e-40, -1.7878e-41,
-4.1980e-40, -4.4383e-40, 2.7859e-40,
7.0317e-42, -8.9973e-41, 5.8700e-41,
1.8411e-40, -3.6097e-42, 2.7362e-40,
5.4341e-40, 6.0305e-40, 5.9004e-40,
5.2692e-40, -6.3449e-41, 1.2075e-40,
7.5297e-41, 8.9267e-41, 4.9139e-40,
-1.4609e-40, 3.1821e-41, 2.3288e-40,
3.1748e-41, -3.8052e-40, -2.4322e-40,
-5.7959e-40, 6.1966e-40, 3.4964e-40,
-5.6776e-40, -6.8327e-41, -3.3777e-41,
-5.9108e-02, 3.5468e-02, -2.8772e-02,
6.8602e-01, 1.4232e-01, 1.1954e-02,
-3.8234e-02, 7.1837e-02, -1.8832e-02,
4.7972e-02, 1.1623e-02, -2.1687e-03,
-4.9744e-01, 2.7751e-01, 1.7862e-02,
7.4286e-02, 3.1309e-03, 1.1030e-03,
-6.1084e-01, -8.5679e-03, 9.4956e-03,
-4.5246e-01, -1.2126e-01, -3.7368e-02,
2.5624e-02, 1.2087e-02, -1.5431e-02,
6.0313e-40, 1.8404e-40, -7.2006e-41,
6.0697e-40, -9.1199e-41, 5.8965e-40,
5.4830e-40, 1.3014e-40, 1.5585e-41,
-3.6027e-02, -6.3004e-03, 1.5237e-02,
6.0743e-01, 9.2523e-02, -4.7370e-03,
3.4407e-02, -8.3823e-02, 1.6898e-02,
5.7527e-40, -5.0621e-40, -2.9035e-42,
3.8199e-40, -2.2913e-40, -5.0895e-40,
4.0079e-40, 5.1744e-40, -3.3006e-40,
6.1448e-40, 1.2347e-40, -3.1673e-40,
7.3214e-41, 5.2143e-40, -2.6071e-40,
1.6109e-40, -2.0298e-40, 9.5817e-41,
6.9876e-02, -2.9290e-02, 3.2294e-03,
-4.2632e-01, 1.5789e-01, 3.6809e-02,
2.1220e-02, 1.6531e-04, 6.8502e-03,
-6.5221e-02, 8.8059e-02, 5.7934e-03,
-1.7280e-01, 1.5303e-01, 1.7663e-01,
-1.2908e-01, -1.1749e-01, 5.7887e-02,
1.0685e-01, 2.2763e-01, 3.3796e-02,
1.7629e-01, 3.8882e-01, 6.3540e-02,
6.4707e-02, 1.0046e-01, -8.1911e-02,
-3.9718e-03, 4.6416e-02, 4.7357e-02,
7.3694e-02, -1.6444e-01, 2.4784e-02,
-3.0808e-03, 2.7399e-02, -2.9216e-04,
2.4428e-40, -3.0160e-40, 2.3184e-40,
-4.9114e-40, 5.6685e-40, -3.6020e-40,
2.2618e-40, -2.8145e-40, 2.1149e-40,
2.3559e-02, -8.6949e-02, -3.8350e-02,
-2.9547e-01, 7.0187e-01, -8.3979e-02,
-2.8576e-02, -1.6538e-01, -5.2465e-02,
-1.6016e-40, -1.4760e-40, -2.1977e-40,
4.3180e-40, 4.1724e-40, -1.2969e-40,
-1.3023e-40, -1.0095e-40, -1.5965e-40,
-4.0721e-40, -4.1747e-40, -4.3706e-40,
-4.2838e-40, -4.5507e-40, -4.6023e-40,
-3.7435e-40, -3.9889e-40, -4.2249e-40,
-1.2429e-01, -3.5062e-01, -1.1418e-01,
-4.0787e-02, 6.1690e-01, -1.0085e-01,
1.6098e-02, 8.5100e-02, -1.1621e-02,
3.0709e-40, -4.4880e-40, -2.7530e-41,
-1.2649e-40, -5.3936e-40, 5.0995e-41,
4.4003e-40, -2.1211e-40, -6.6422e-43,
-1.8989e-40, -3.6631e-40, 4.1392e-40,
-3.9057e-40, -5.5599e-40, 6.9979e-41,
3.8983e-40, 5.6737e-41, 2.3997e-40,
-9.4862e-41, 2.4256e-40, -3.7040e-40,
1.6374e-40, 3.5439e-42, -1.0385e-40,
3.6145e-40, -2.4342e-41, -3.0115e-40,
-6.0009e-40, -5.2386e-41, -1.2504e-40,
2.9237e-40, -1.2290e-40, -1.1502e-40,
-3.5887e-40, -6.1810e-40, -1.6289e-41,
2.5438e-41, 5.1229e-40, -2.4915e-40,
1.3516e-40, 3.3553e-40, 8.5831e-41,
-8.5122e-41, 3.7625e-41, 2.5507e-40,
-1.5828e-40, 2.1991e-40, -1.5628e-40,
-5.3110e-40, 5.1395e-40, -5.8162e-40,
-3.1571e-40, -5.5139e-40, 1.2299e-40,
4.8855e-40, -9.3940e-41, -6.2534e-40,
-3.3275e-40, -2.4982e-40, -1.2956e-40,
-6.0047e-40, -1.8712e-41, -7.3274e-42,
-2.8519e-40, 3.5541e-40, 2.4485e-40,
-8.1435e-41, -2.7091e-40, 7.1206e-41,
-5.9519e-41, -2.5552e-40, -3.6189e-40,
7.7038e-02, -1.6317e-02, -2.4118e-02,
-4.3086e-02, -2.1512e-01, 1.2288e-01,
1.8237e-01, -1.5438e-01, -1.1346e-01,
-4.6141e-02, -4.0750e-02, -5.6414e-04,
-1.5640e-01, -3.4506e-01, -1.4441e-02,
-2.0278e-01, -3.1403e-01, -6.2542e-02,
-1.9622e-02, 1.6348e-02, 6.9859e-03,
-9.3142e-02, 1.0368e-02, -5.6585e-02,
8.4213e-02, 1.0776e-01, -1.0315e-01,
8.7873e-41, -5.3947e-40, 1.1714e-40,
7.5534e-41, -1.1871e-40, -5.4012e-40,
3.8269e-41, -1.4913e-40, -3.1802e-40,
-3.4707e-02, 1.2518e-02, 9.4679e-03,
1.2254e-01, 1.9394e-01, 2.6530e-02,
2.2413e-01, -1.6298e-01, -6.1446e-02,
-1.1042e-42, -2.7255e-40, -5.5067e-40,
3.8272e-40, 4.9956e-40, -3.2074e-41,
2.8351e-40, 4.2501e-40, 3.9389e-41,
6.1941e-40, -4.8790e-40, -3.4137e-40,
2.2577e-40, -5.7183e-40, -8.6861e-41,
5.7021e-40, -3.2349e-40, 1.9655e-40,
9.1180e-02, 5.6665e-02, -6.5437e-04,
1.1759e-01, 2.7517e-01, 1.9143e-01,
9.7905e-02, 6.6707e-02, 8.6535e-02,
8.8717e-03, 3.0913e-02, 6.6909e-03,
-8.1791e-02, -4.7883e-01, 7.4920e-02,
4.5843e-01, -1.0410e-01, 1.6655e-01,
-4.7094e-03, 3.4769e-02, -1.3291e-02,
-8.5570e-03, -4.0038e-01, 1.8418e-01,
-1.4696e-01, 3.2279e-01, 2.5712e-02,
-2.6207e-01, -4.6150e-02, -6.4099e-02,
-3.2623e-01, -1.8984e-01, -5.7891e-02,
-2.2088e-01, -4.2042e-02, -2.5307e-02,
1.0260e-40, 5.0443e-40, 7.5150e-41,
1.4402e-40, -5.1952e-40, -5.3810e-40,
6.2240e-40, 1.8661e-40, -8.2983e-41,
7.1850e-02, 4.8770e-02, -1.5081e-02,
4.8072e-01, 2.5477e-01, 3.8197e-02,
2.6011e-01, 2.4610e-01, -3.6167e-02,
3.8901e-40, 1.6760e-41, 2.8471e-40,
3.1983e-40, 1.2460e-40, -4.3961e-40,
3.9187e-40, 2.7818e-40, -9.1501e-41,
-2.3320e-40, -1.9998e-40, -2.8132e-40,
-2.9552e-40, -3.9643e-40, -5.1375e-40,
-1.6686e-40, -5.3138e-40, -2.6988e-40,
2.5623e-02, 2.6942e-02, 2.4342e-02,
-9.9084e-02, 5.2974e-01, -6.7983e-02,
-2.2454e-01, 1.1507e-01, 2.0364e-02,
3.4852e-01, -3.1091e-01, 8.1154e-02,
-3.2205e-01, 1.7103e-01, 2.4162e-01,
-2.6892e-03, 2.4142e-02, 5.5540e-02,
-4.5753e-02, -5.0097e-01, 1.7503e-01,
1.4058e-01, 1.1311e-01, 1.5945e-01,
-5.3975e-02, 5.2326e-02, -6.2382e-02,
9.4114e-02, -5.6812e-01, -1.2081e-01,
-8.5809e-02, -9.8661e-03, -2.3064e-02,
-1.6453e-03, -1.8328e-02, 2.4282e-03,
1.5943e-40, 4.6894e-40, -6.2730e-40,
3.8054e-40, -3.7914e-41, -1.4429e-40,
1.6925e-40, 5.1566e-41, -1.7909e-40,
-3.7920e-02, 2.4698e-01, 5.0019e-02,
-1.4246e-02, 2.8739e-01, -5.4704e-02,
7.9436e-02, -2.7838e-02, -3.4191e-02,
-3.3565e-40, 2.1368e-40, 6.7346e-42,
5.6681e-40, -5.5776e-40, -2.7705e-40,
-2.2966e-40, 1.1692e-40, -2.5187e-40,
4.4806e-40, -4.8424e-40, -9.1436e-41,
-4.3250e-40, -2.0721e-40, -2.0050e-40,
-5.1061e-40, 2.6405e-40, -3.0913e-40,
-1.2078e-01, 3.1948e-01, 1.0082e-02,
-1.0781e-02, 8.0720e-02, -4.6330e-02,
-1.8084e-02, -2.2846e-02, -5.5861e-03,
-3.2400e-02, -1.7329e-01, -2.7995e-02,
-5.3680e-02, 4.1310e-01, -9.4691e-02,
7.6938e-02, -4.9596e-02, 1.9649e-01,
3.2594e-02, 1.1544e-01, -1.8501e-02,
7.0248e-02, -6.9838e-02, -5.4278e-02,
-2.9317e-02, -1.4890e-01, 7.8661e-02,
3.7685e-02, 5.9594e-02, 8.9527e-02,
2.2957e-01, -2.9681e-01, -1.6329e-01,
-1.3206e-01, -4.3808e-02, 3.8854e-02,
1.7529e-40, -3.8429e-41, 1.4443e-40,
-4.0829e-40, -2.5643e-40, -5.4821e-40,
1.6827e-40, -1.1628e-40, 2.2441e-40,
5.2451e-02, 1.0179e-01, 4.8487e-02,
-2.1020e-01, -4.4345e-01, -8.7642e-02,
7.0958e-02, 1.9934e-01, -2.1090e-02,
-3.0795e-41, 2.7921e-40, 2.8491e-40,
-2.1154e-40, 9.8876e-41, -8.8824e-41,
2.6552e-40, 2.5767e-40, -3.8369e-40,
6.1348e-40, -3.4170e-40, -1.7109e-40,
-3.3080e-40, 5.4199e-41, -1.7512e-40,
1.8363e-40, -4.4080e-40, -2.5508e-40,
-4.0716e-02, -2.8531e-01, 3.9981e-02,
2.2278e-02, 5.6661e-01, -8.3890e-02,
-7.7331e-02, -9.3843e-02, 1.5584e-02
}
,
{
-3.6751e-40, -5.4562e-41, 6.1860e-40,
8.9003e-41, 5.5262e-40, 3.9537e-40,
-2.1258e-42, -3.1069e-40, -7.6225e-41,
-1.2220e-02, -8.6886e-02, 1.0714e-02,
1.1656e-02, -7.3635e-02, 5.9427e-02,
4.8518e-03, 1.3543e-01, 1.4668e-02,
-1.7505e-02, -2.0691e-02, -1.4507e-02,
2.6157e-02, 7.4109e-02, 1.2822e-02,
-1.9737e-02, -4.9281e-02, 8.5962e-03,
5.6236e-40, 2.4616e-40, 1.6384e-40,
-3.9469e-40, -1.7094e-40, 1.9285e-40,
-1.3634e-40, -1.5785e-40, 6.4184e-41,
-1.2752e-02, 2.3150e-02, -5.3355e-03,
-5.9667e-02, -3.9580e-01, -7.0033e-02,
-2.2612e-02, 1.9176e-02, 1.0588e-02,
8.0027e-04, 3.2242e-01, -2.2566e-02,
8.7850e-03, -2.4025e-01, 4.6123e-02,
-1.9038e-02, -8.5750e-03, -4.8153e-03,
-1.3049e-03, -5.7771e-03, 9.6437e-03,
3.2477e-02, 2.4482e-01, 4.0580e-02,
1.3194e-02, -4.6602e-01, -6.6163e-02,
-1.0647e-01, 7.3328e-02, 2.5871e-02,
-7.0883e-02, -9.2725e-02, -1.5185e-02,
1.1804e-02, 1.7784e-03, -4.4099e-03,
-4.9226e-40, -1.3081e-40, -3.5969e-40,
4.3539e-40, -2.9631e-40, 2.3531e-41,
5.6191e-40, 6.1545e-41, -1.1112e-40,
-1.1880e-02, -3.1884e-02, -2.0850e-02,
-6.8633e-03, 1.6422e-01, 1.0281e+00,
3.5887e-03, 2.1180e-01, -1.0094e-01,
-1.5103e-02, -4.9074e-02, -1.7702e-02,
7.2119e-02, 3.3199e-02, -9.7082e-04,
5.5383e-02, 1.0343e-01, 2.5156e-02,
2.9049e-40, -1.6397e-40, -8.8848e-41,
-6.2827e-40, 8.1281e-41, 5.2909e-40,
-4.1132e-40, 1.5751e-40, 1.5400e-40,
-7.3765e-02, -4.9723e-02, 4.9357e-02,
-2.4207e-02, -1.0291e-01, -1.4001e-03,
-1.2751e-02, 4.2805e-03, 1.8934e-03,
2.6862e-02, 1.1634e-01, 4.5666e-02,
-4.7351e-03, -4.1593e-01, 3.6082e-02,
1.1446e-02, -5.2026e-03, 1.8672e-02,
-7.0960e-04, -6.7877e-03, 9.6674e-03,
-4.9952e-03, 8.8664e-02, -2.7707e-02,
8.5309e-02, 5.5513e-02, -7.6230e-02,
3.6354e-02, 9.7794e-02, 1.1687e-02,
2.6847e-02, 3.2565e-01, -8.7710e-03,
-2.0372e-02, -1.9090e-02, -3.2566e-03,
-5.5592e-40, 7.4408e-41, 3.5576e-40,
2.7758e-40, 4.5458e-41, -6.2347e-40,
9.9739e-41, -1.6078e-40, -5.2900e-40,
1.1500e-02, -3.0675e-01, -3.0079e-02,
1.5080e-02, -2.4292e-01, 1.2736e-01,
-1.9513e-02, -1.9376e-02, -8.5960e-02,
-1.0241e-01, -2.1312e-02, -3.1999e-02,
-6.3598e-02, 1.5187e-01, 1.2279e-01,
1.5695e-03, 1.1376e-01, 5.2648e-03,
2.6415e-40, 3.0508e-40, 3.6407e-41,
-1.4403e-40, 2.8942e-40, -1.0089e-40,
2.2362e-41, 1.9843e-40, -1.5509e-40,
1.3269e-01, -3.1031e-01, -4.4091e-02,
4.6385e-03, 2.1411e-02, 5.7141e-02,
2.0724e-02, -3.5406e-02, 2.5717e-03,
-5.5922e-02, 7.1404e-01, -2.9852e-02,
1.3041e-02, 3.9373e-02, -2.4515e-01,
4.4278e-03, 2.1557e-02, -8.4940e-03,
1.3677e-02, -3.5183e-02, 1.2391e-02,
-9.2405e-02, 2.9650e-01, 6.9695e-02,
-3.3125e-02, 3.4700e-01, 1.4552e-01,
2.7357e-02, 5.2133e-01, -5.7571e-02,
2.7580e-02, 1.0381e-01, 1.3678e-02,
4.9260e-03, -4.4419e-02, 7.0651e-04,
2.9472e-40, -5.2892e-40, -3.6567e-40,
4.9403e-40, -6.2132e-40, -6.2920e-40,
-1.5156e-40, -3.6134e-40, 5.2432e-40,
-5.0427e-03, -2.8247e-03, -5.3734e-02,
-1.5918e-02, 1.8325e-01, -1.7834e-01,
-5.1774e-03, 8.0009e-02, 5.6296e-03,
3.1480e-02, 2.0665e-02, 2.7806e-04,
7.3085e-02, 7.7660e-01, 1.1979e-01,
1.9979e-02, 1.6629e-01, 2.3216e-02,
-5.9701e-40, 9.5583e-41, 1.8231e-40,
-3.3216e-40, -4.1253e-40, -3.3326e-40,
1.7131e-40, 2.9588e-40, -2.2520e-40,
-1.3337e-01, -4.2777e-01, -1.3569e-01,
2.9915e-02, -2.7016e-01, -3.7454e-03,
-1.3574e-02, -3.6298e-02, -1.6571e-02,
4.2530e-02, -4.2299e-02, 1.4320e-01,
1.4371e-02, -1.1289e-01, -3.8829e-02,
5.1689e-03, 1.5804e-02, 1.6125e-03,
-3.4601e-03, -7.2087e-03, -5.5514e-04,
4.4568e-02, 1.3621e-01, -4.3811e-02,
1.1350e-02, -2.8417e-01, 3.1553e-02,
-7.8854e-02, -2.0316e-01, 7.7746e-03,
-1.1437e-02, 2.1557e-01, -1.9479e-02,
-1.3511e-02, -2.0339e-02, -1.0276e-02,
-8.8977e-41, 5.9533e-40, -3.1413e-40,
-3.1892e-40, 5.5204e-40, -5.0634e-40,
-2.4932e-41, 4.3474e-41, 6.2961e-40,
4.7864e-03, 5.7125e-02, -1.5468e-02,
-3.9614e-03, -2.9042e-02, 2.8347e-01,
-1.0133e-02, 8.2745e-02, -1.0450e-01,
5.9537e-03, 1.4050e-02, 1.9802e-04,
2.4964e-02, 1.3077e-01, -4.7314e-02,
6.2744e-03, -1.9068e-01, 5.2593e-02,
-2.0550e-40, -2.4231e-40, 3.3927e-40,
-3.9609e-41, 2.2262e-40, 1.8866e-40,
2.0788e-40, -1.8012e-40, -1.9375e-40,
-4.7530e-03, -1.2315e-01, 8.2373e-03,
-9.2412e-02, 1.7156e-01, 1.1176e-02,
-1.4081e-02, 1.4694e-02, -1.9475e-02,
-1.5269e-02, -3.8430e-02, -7.4717e-02,
3.3361e-02, -1.1956e-01, 4.2304e-01,
-2.9924e-03, -3.3035e-02, -3.6560e-02,
-1.2386e-02, 6.3762e-03, -3.7047e-02,
1.3839e-02, -3.6358e-02, 4.3609e-02,
-8.3692e-03, 4.5794e-01, -3.0761e-01,
2.2287e-02, 2.5360e-02, -6.1253e-03,
-1.8992e-02, -4.0078e-01, 7.3821e-02,
5.6517e-03, 4.2348e-02, -2.5642e-02,
5.5659e-40, -6.1219e-40, 4.1493e-40,
5.7719e-42, -3.7181e-40, -3.3260e-40,
-4.8241e-41, 5.2207e-40, -1.2199e-40,
-1.2074e-02, 1.7647e-01, 1.1882e-02,
6.4764e-03, -2.3742e-01, -1.8033e-01,
2.5866e-02, 6.5985e-02, 3.7191e-02,
5.1047e-02, -3.0457e-02, 1.2531e-02,
-1.3252e-01, 1.2593e-01, -6.3717e-02,
4.0794e-02, -1.4786e-02, 1.7139e-02,
2.4343e-40, -1.7451e-40, 2.0169e-40,
-5.5166e-40, 2.4201e-40, -2.5701e-40,
2.9947e-40, 2.9321e-40, -1.6015e-40,
-3.6598e-02, -1.8520e-03, -1.6999e-01,
-8.6806e-02, -7.7266e-02, -9.6042e-02,
-2.1342e-02, 2.5793e-02, -7.2541e-03,
3.0667e-02, -2.6287e-01, 3.0592e-02,
-4.5559e-02, -1.4716e-01, 2.0932e-01,
-5.8472e-03, -1.0023e-02, 1.2134e-02,
-1.3284e-02, 2.0538e-02, -5.4476e-04,
5.8096e-02, -1.4790e-02, -2.0158e-02,
-3.9654e-02, -2.2069e-01, -1.5089e-01,
-1.8966e-01, -1.6834e-01, 9.8934e-02,
8.2326e-02, 7.5585e-02, -1.7188e-02,
-1.4985e-02, 2.1823e-02, -7.7015e-03,
1.8353e-40, 4.8298e-40, -2.0568e-40,
-3.7196e-40, -5.7237e-40, 1.0648e-40,
9.4960e-41, 3.0411e-40, 1.3294e-40,
-1.4884e-02, 4.9767e-02, -3.0288e-02,
8.9874e-03, -1.0290e-01, 3.1344e-01,
5.9735e-03, -2.0813e-01, -6.6145e-03,
1.6592e-02, 3.0529e-05, -1.0180e-02,
-4.8683e-02, 1.4025e-01, 2.9237e-02,
-2.3334e-02, -9.6638e-02, -1.0268e-02,
-4.9497e-41, -5.6377e-40, -2.0142e-40,
2.1230e-40, 1.6067e-40, 3.4830e-40,
-4.9031e-40, -3.0290e-40, -2.9060e-40,
3.4053e-02, -8.9560e-02, -4.4479e-02,
4.2128e-02, 6.9253e-02, -7.1096e-03,
4.2358e-02, -1.7215e-02, 9.0389e-03,
1.8129e-02, -1.4785e-01, 1.1267e-01,
-7.1637e-02, 5.5595e-01, -1.0569e-02,
1.8481e-02, -4.7556e-02, -1.1185e-02,
-1.1766e-02, -8.5959e-03, -3.0046e-02,
-2.1081e-03, 1.1518e-01, -8.4419e-02,
-7.5829e-02, 1.8199e-01, -9.7726e-03,
3.6473e-02, 1.8761e-01, 4.9495e-03,
-6.9640e-02, -2.8775e-01, 3.6149e-02,
9.6345e-04, 1.3967e-02, -6.0015e-03,
2.9861e-40, 3.9190e-40, 5.3741e-40,
3.8059e-40, 4.7113e-40, 5.9498e-40,
-5.0640e-40, -4.1610e-40, 6.2009e-40,
-2.3464e-03, -7.3888e-02, 3.4701e-02,
-5.2257e-04, 3.8444e-02, -5.3735e-01,
-1.7970e-03, 9.0298e-02, 5.3151e-02,
-2.6033e-02, 1.2973e-02, 4.9147e-03,
2.3005e-02, 1.7045e-01, 2.4715e-02,
2.7981e-02, -8.4662e-02, -9.4778e-03,
5.3019e-40, -2.1800e-40, 1.5281e-40,
-1.0282e-40, 1.8040e-41, 1.3929e-40,
-5.9679e-40, -5.2958e-40, 1.4429e-40,
3.4325e-02, -1.7240e-01, -4.9645e-02,
-2.4341e-02, 5.2652e-02, -1.1188e-02,
-3.6336e-03, 4.2148e-04, 3.3086e-03,
5.5059e-03, 1.7744e-01, -2.8681e-02,
-3.4868e-03, -1.4569e-01, 1.6508e-02,
4.6766e-03, -1.7963e-02, -2.6397e-03,
4.3618e-03, -4.2793e-03, -4.7820e-04,
-4.2795e-02, 2.0070e-01, 3.8402e-02,
5.0586e-02, 2.1910e-01, -3.4381e-02,
5.7625e-02, 4.2314e-01, -1.9732e-02,
3.4811e-02, -2.3033e-01, 1.1477e-02,
-7.3744e-03, 1.9112e-02, 4.2251e-03
}
};
static __device__ __constant__ const float HDNL0biasL[8][8] =
{
{
0.0272, -0.5743, -0.0333, -0.0334, 0.0082, -0.0263, -0.0048, -0.0167
}
,
{
-0.0239, -0.0385, 0.0026, 0.0288, -0.0225, 0.0082, -0.0191, -0.0185
}
,
{
-5.8305e-03, -8.6574e-02, 4.2228e-02, -4.3500e-02, -8.1892e-04, 3.3171e-03, -1.1582e-02, -4.1205e-40
}
,
{
-0.0053, 0.0053, -0.0114, -0.0127, -0.0039, -0.0426, 0.0053, -0.0017
}
,
{
-0.0046, -0.0104, -0.0087, -0.0040, 0.1077, 0.0347, -0.0165, 0.7296
}
,
{
8.7612e-02, 5.9126e-01, 4.6709e-03, -1.1559e-39, 2.3381e-02, -1.2136e-40, -5.6040e-39, 3.7100e-02
}
,
{
-3.3246e-39, -1.4536e-02, -6.3362e-02, 8.5347e-41, 7.9956e-02, 3.0679e-04, -1.0257e-02, -1.2037e-02
}
,
{
-0.0006, 0.0117, 0.0083, 0.0686, -0.0046, 0.0015, -0.0076, 0.0079
}
};
static __device__ __constant__ const float HDNL0kernelsL10[4 * 8] =
{
0.4908, -0.0457,
-0.1716, -0.2115,
-0.0015, -0.3152,
0.3045, 0.0330,
-0.2981, 0.0912,
0.0122, 0.2281,
0.3331, 0.2853,
0.2210, 0.2611,
0.2364, 0.0792,
0.2885, -0.7122,
-0.3715, 0.1404,
-0.0260, 0.2144,
0.2378, 0.1570,
-0.5734, 0.2077,
-0.0851, 0.2771,
0.0415, -0.1858
};
static __device__ __constant__ const float HDNL1kernelsL1[9 * 8] =
{
-6.6326e-02, -2.2316e-01, 4.2471e-02,
1.7064e-02, -6.8305e-01, -1.5978e-01,
6.7568e-01, 3.2212e-01, 8.3561e-02,
-4.6649e-01, -6.8789e-02, 5.3455e-01,
-5.0941e-01, 7.0657e-02, 4.5647e-01,
-2.3657e-02, 3.5302e-02, -1.8316e-02,
-2.0316e-01, 4.7021e-02, -2.2313e-01,
5.3465e-02, 7.0750e-01, 9.1366e-02,
-2.8566e-01, -2.0521e-02, -7.1786e-02,
4.8186e-02, -9.3429e-02, 2.4493e-03,
3.4654e-01, 7.2625e-02, 1.6615e-01,
3.2101e-01, 3.2923e-01, -9.8548e-02,
1.1916e-02, 2.0413e-01, -1.8920e-02,
6.0858e-02, 8.3548e-01, 1.4060e-01,
-9.1827e-01, -2.4551e-01, -4.6118e-02,
-5.2737e-02, 4.3151e-01, 1.7027e-01,
2.6647e-01, 5.5240e-01, 3.4745e-03,
5.3495e-02, -4.7059e-02, -2.6593e-02,
1.5691e-01, 4.7332e-01, 2.6651e-03,
1.7997e-02, 4.1367e-01, 1.3239e-02,
4.6932e-02, 1.0278e-01, 1.0699e-02,
-3.4319e-02, -7.6373e-01, -9.7022e-02,
-1.4160e-01, 2.9567e-01, 6.6220e-01,
7.3508e-05, 1.2683e-01, -6.3442e-02
};
static __device__ __constant__ const float HDNL1biasL1[8] =
{
-0.0264, -0.0229, -0.3021, -0.2579, -0.0327, -0.0053, -0.7777, 0.0232
};
static __device__ __constant__ const float HDNL1kernelsL[8][9 * 8 * 8] =
{
{
-7.8588e-41, -5.0770e-40, -2.3334e-40,
5.7174e-40, 6.9060e-41, 2.2264e-40,
-4.1631e-40, 4.5667e-40, -1.8115e-40,
-3.1000e-40, 3.1019e-40, 5.5423e-40,
-5.8518e-40, 2.1290e-40, -5.4579e-40,
-3.7753e-40, 3.6029e-40, -1.7875e-40,
4.2296e-40, 6.5672e-41, 1.4976e-40,
-3.1479e-40, -3.2881e-40, -5.9818e-40,
3.2053e-40, 3.0821e-40, 5.1321e-40,
-2.6557e-17, -3.8205e-17, -3.7077e-17,
-2.5168e-17, -3.4817e-17, -3.4186e-17,
-1.8056e-17, -2.3105e-17, -2.2581e-17,
5.9355e-40, 2.4052e-40, -1.0027e-40,
2.2060e-40, 3.4864e-40, -5.7403e-40,
4.6936e-40, -3.3951e-40, -4.7715e-40,
-9.7917e-11, -1.0331e-10, -9.6141e-11,
-1.0581e-10, -1.1173e-10, -1.0317e-10,
-1.0192e-10, -1.0681e-10, -9.8738e-11,
-1.0402e-29, -2.3233e-29, -1.7882e-29,
-1.4804e-29, -3.7821e-29, -3.0750e-29,
-1.0448e-29, -2.6740e-29, -2.1676e-29,
4.2124e-40, 2.5024e-40, 4.5312e-40,
-2.4880e-40, 2.9838e-41, -2.7215e-41,
-2.6347e-40, 1.5950e-40, 9.3734e-41,
-1.4936e-01, -1.0438e-01, 2.9827e-02,
1.4751e-02, -1.6854e-01, -8.8101e-02,
4.9228e-02, -3.0744e-02, -1.1512e-01,
-3.4996e-02, -2.5024e-02, -1.8880e-02,
3.0008e-02, 4.8689e-02, -1.3415e-01,
-9.1698e-03, -1.1019e-02, -5.0655e-02,
-6.6579e-02, -2.6447e-02, 1.9791e-02,
-4.1727e-02, 3.6433e-02, 3.1516e-02,
-5.7619e-02, 2.3401e-02, 3.0785e-02,
-3.3610e-02, 1.2263e-01, 2.4351e-02,
1.7148e-02, 1.7144e-01, 4.0305e-02,
8.7902e-03, -7.0077e-02, -1.0688e-01,
4.7460e-02, -1.4093e-03, -1.5911e-02,
-2.2978e-02, 9.9025e-02, 1.2867e-02,
3.4704e-02, 1.4672e-01, 7.9188e-02,
-4.4222e-02, -3.9480e-02, -1.9193e-01,
-3.1897e-02, 1.0776e-01, -5.2742e-02,
8.0377e-02, 2.5764e-01, -9.7330e-02,
-1.1593e-01, -5.3753e-02, -2.8918e-02,
6.7939e-02, 2.3963e-01, 2.0856e-01,
2.7964e-02, 2.7781e-01, 2.1859e-01,
-1.5196e-02, 9.6704e-03, -8.0136e-02,
8.9441e-02, 1.0314e-01, -2.0204e-02,
-3.3970e-02, -1.4562e-02, 3.4723e-02,
2.3357e-40, -1.4361e-40, 2.0498e-40,
-5.2355e-40, -6.0151e-40, -2.9264e-40,
1.9715e-41, 5.9793e-41, -1.3675e-40,
5.3771e-40, 6.5637e-41, -3.8471e-40,
-3.0820e-40, -1.7004e-40, -1.9371e-40,
-5.1159e-40, 7.3244e-41, 3.5861e-41,
2.8441e-40, 4.5248e-41, 1.9771e-40,
-2.4681e-40, 3.6054e-40, 3.3496e-40,
-6.5048e-42, -1.6001e-40, 4.8243e-41,
-1.0165e-08, -9.9140e-09, -9.6054e-09,
-1.0511e-08, -1.0256e-08, -9.9066e-09,
-1.0521e-08, -1.0320e-08, -9.9896e-09,
2.6042e-40, 4.2016e-40, 5.3537e-40,
1.4594e-40, 1.1344e-40, 3.5144e-40,
-2.5736e-37, -1.3591e-39, 2.1029e-40,
-3.1420e-07, -3.0309e-07, -2.9630e-07,
-3.1196e-07, -2.9967e-07, -2.9249e-07,
-3.1296e-07, -3.0086e-07, -2.9332e-07,
-6.1256e-12, -5.9283e-12, -5.6508e-12,
-6.5297e-12, -6.4118e-12, -6.0667e-12,
-6.8382e-12, -6.8547e-12, -6.5225e-12,
-5.0327e-26, -1.0795e-25, -1.8952e-25,
-2.4220e-26, -5.9067e-26, -1.1323e-25,
-2.1499e-27, -5.5342e-27, -1.0333e-26,
4.5039e-03, -1.3303e-02, 1.6183e-01,
6.5951e-02, -7.1353e-02, 1.7254e-01,
-1.8671e-03, 1.0593e-01, -3.6872e-02,
4.9102e-02, -2.4075e-03, 4.8194e-02,
-7.0892e-02, -1.8948e-01, -1.6586e-01,
-2.8102e-02, 2.0870e-02, 5.9228e-02,
1.2673e-02, 3.3908e-02, 4.8282e-02,
4.4369e-02, 5.6304e-02, 1.2225e-02,
4.1855e-02, 1.1990e-01, 6.3799e-02,
-7.3884e-02, 1.4153e-02, 9.5825e-02,
4.2850e-02, -3.5337e-02, 1.3615e-01,
-2.0900e-01, -2.2835e-02, -8.6987e-02,
-6.7793e-02, 1.3547e-01, -9.9666e-02,
3.5498e-02, 5.3725e-02, 1.1501e-01,
-1.2238e-01, 3.5354e-02, 7.4216e-02,
-3.5288e-02, 7.0111e-03, 2.4820e-02,
-1.0649e-02, 1.6715e-01, 1.2825e-01,
3.1145e-02, 1.2097e-01, -1.2073e-02,
-7.0603e-02, 5.5574e-02, -5.0025e-02,
-8.2885e-02, 1.0957e-01, 1.3311e-01,
2.9147e-02, -1.1849e-02, 8.9953e-02,
-3.2247e-02, -1.0747e-02, 9.1431e-03,
1.2114e-01, -5.9780e-02, 5.4821e-02,
-5.2592e-02, -6.9082e-02, -7.5981e-02,
-7.8533e-02, 1.3658e-01, 1.0923e-01,
-3.2530e-02, -2.1342e-01, -1.2200e-01,
-1.9196e-02, 1.0450e-01, -8.9044e-02,
-2.0110e-02, 6.1439e-02, -2.7405e-02,
6.0823e-02, -6.4268e-03, -9.1778e-03,
6.4877e-02, -6.1227e-02, -5.4466e-02,
9.6375e-02, 1.7519e-01, 5.0725e-03,
1.9159e-01, 3.9725e-01, 1.2851e-01,
-6.9197e-02, 4.9372e-02, -3.4221e-02,
1.1583e-01, 1.3389e-01, 2.9135e-01,
1.0290e-02, 1.1214e-01, 1.7560e-01,
-1.8048e-02, 8.4782e-02, 4.9925e-02,
-3.8447e-02, -1.3156e-01, -1.1072e-01,
1.8256e-01, 2.2831e-01, -1.6508e-01,
4.6781e-02, 1.4913e-01, -8.6956e-02,
5.1365e-04, 6.7873e-02, -3.4787e-03,
1.7689e-01, 1.8414e-01, 2.2286e-01,
1.2571e-01, 1.7687e-01, 1.5949e-01,
5.9904e-02, 1.6259e-01, 1.4313e-01,
2.2234e-01, 4.0943e-01, 3.1469e-01,
1.9799e-01, 4.3052e-01, 3.0510e-01,
1.2259e-01, -1.0778e-02, 6.2284e-03,
1.4508e-02, -6.9073e-02, 5.0998e-02,
5.2962e-02, -1.5291e-01, -1.0491e-02,
-8.6903e-02, -1.0430e-01, 3.0130e-02,
4.1691e-02, -1.2675e-01, -5.5169e-01,
8.9644e-02, 3.6910e-02, -1.5459e-01,
5.3656e-03, 6.7936e-02, 1.0793e-01,
-2.7424e-02, -1.7652e-01, -3.5776e-01,
2.4593e-02, -5.6237e-01, -5.9038e-01,
-9.4807e-02, -7.5681e-02, -3.6990e-02,
8.7385e-03, -5.7989e-02, -4.9573e-02,
-7.7422e-02, -1.1899e-01, -7.4023e-02,
9.1539e-03, -1.1760e-01, 4.6825e-02,
1.9901e-02, -3.9718e-02, 1.2997e-02,
4.2209e-02, -5.2119e-02, -1.2255e-01,
2.4262e-02, 5.3676e-02, -2.4767e-01,
-4.2933e-02, -2.2473e-01, -4.0310e-01,
-3.5160e-02, 1.9858e-01, -1.5943e-01,
1.3208e-01, -1.0493e-01, -6.7076e-02,
-2.5244e-01, 1.1175e-02, 2.5568e-01,
-3.3867e-01, 3.1953e-02, 5.9426e-01,
4.0551e-02, 4.4914e-03, -1.9348e-02,
-6.7386e-02, -1.5543e-01, -3.0883e-02,
8.9177e-02, -4.6432e-02, 6.8227e-02,
8.7784e-02, 3.6127e-02, -2.0375e-02,
4.5461e-02, -4.9071e-02, 9.9435e-02,
-2.5700e-01, -2.7706e-01, 6.2776e-02,
-6.9571e-02, -5.7888e-03, 9.3852e-02,
2.8490e-02, -2.7854e-01, 1.4209e-01,
1.5373e-02, -4.3503e-02, 9.6895e-02,
1.1682e-02, 1.5608e-01, 1.5844e-01,
5.8027e-02, 2.6632e-02, -8.5479e-03,
1.2836e-01, 2.0714e-01, 1.0228e-01,
1.4647e-02, 5.7609e-02, -1.6728e-02,
2.1212e-01, 3.2673e-01, 4.5670e-02,
-6.0844e-02, -1.1768e-01, -1.1233e-01,
5.0123e-04, 6.3947e-02, -1.8356e-01,
1.4091e-01, -2.1568e-02, 8.5933e-02,
-3.9406e-02, 8.2921e-02, -1.0601e-01,
4.1284e-02, -7.3138e-02, 1.7264e-01,
2.5883e-02, 5.2945e-01, 2.4510e-01,
2.7291e-03, 4.0173e-02, 7.8221e-03,
-3.5795e-02, -4.8631e-03, -2.2715e-01,
1.2330e-01, 7.1739e-01, -4.1725e-01,
7.5106e-02, 2.5267e-02, -2.8655e-01,
-7.8731e-02, -7.5747e-03, -5.5601e-02,
7.9764e-02, 1.0524e-01, 8.6742e-03,
2.1791e-02, 3.7304e-02, -1.1534e-01,
-1.2011e-01, -7.5160e-02, 1.3737e-02,
-2.9470e-01, 2.6613e-01, -2.3740e-02,
1.2957e-01, 1.4752e-01, -9.3655e-02,
2.9828e-02, 2.0664e-01, 1.9731e-02,
-8.0378e-02, -3.9481e-01, -1.5395e-01,
-5.7944e-02, -8.6343e-02, -5.4324e-02,
7.1664e-02, 1.5294e-01, -1.2112e-02,
2.1023e-02, 1.1945e-01, -7.2998e-02,
-1.1693e-02, -1.8818e-01, -9.8693e-02,
-6.7017e-02, 6.9767e-02, -5.0268e-02,
-9.1106e-03, 2.4267e-01, 6.0277e-02,
3.5269e-02, 7.7376e-02, 1.6642e-02,
-5.2600e-02, -1.8864e-01, -1.1195e-01,
3.2119e-01, -9.7913e-02, 1.4734e-01,
8.6988e-02, -5.3563e-03, -2.6136e-03,
-9.1528e-03, 2.8186e-01, -1.5933e-01,
4.8499e-02, 4.5189e-01, -1.6399e-01,
5.8164e-02, 6.3251e-02, -2.8738e-02,
2.0424e-01, -7.2819e-02, 2.1903e-02,
-3.5630e-01, 1.3171e-01, -7.6749e-02,
3.8848e-02, 1.7902e-01, -1.1902e-01,
-4.4221e-02, 1.5032e-02, 2.9078e-02,
-1.9738e-01, -1.4878e-02, 1.3315e-02,
1.3956e-02, 1.2856e-01, 7.0688e-02,
2.0933e-01, 1.7286e-01, 6.7601e-02,
5.5136e-01, 4.6866e-01, 1.8402e-01,
2.2362e-01, 2.4124e-01, 1.3167e-01
}
,
{
-5.2308e-12, -5.4024e-12, -5.0039e-12,
-5.4553e-12, -5.6928e-12, -5.2812e-12,
-5.0230e-12, -5.2150e-12, -4.9133e-12,
5.7994e-02, 1.0051e-01, -1.0618e-01,
6.8090e-02, 1.2789e-01, 1.1380e-01,
-1.5882e-01, 8.2323e-03, -9.1424e-02,
2.0132e-07, 2.0907e-07, 2.1344e-07,
2.1179e-07, 2.2018e-07, 2.2381e-07,
2.1095e-07, 2.1920e-07, 2.2150e-07,
2.9336e-02, 5.4427e-02, -1.2082e-01,
5.8399e-02, 2.2261e-01, 1.1165e-01,
-9.6098e-02, 8.3175e-02, -6.5909e-02,
1.2007e-01, 1.9776e-01, 7.7464e-02,
6.7018e-02, 3.6536e-01, 1.3796e-01,
6.0724e-02, 4.6161e-02, 2.3740e-01,
-2.1117e-02, -2.0200e-02, 9.3703e-02,
-4.6932e-02, -1.5910e-01, 8.8094e-02,
-5.6641e-02, -1.7146e-01, -1.0502e-01,
-2.5624e-01, 1.6049e-01, -3.3267e-02,
-2.3248e-01, 5.4036e-01, 1.0027e-01,
-2.1680e-01, -7.0096e-03, -1.0692e-01,
-4.8357e-02, 2.5107e-01, 4.8323e-02,
9.7245e-02, 5.5015e-01, -3.4641e-01,
1.2458e-02, -1.3626e-01, -4.1992e-01,
-2.1359e-40, -1.4250e-40, -4.7123e-40,
-5.9433e-41, 1.9903e-41, -1.7701e-40,
-5.9941e-40, -5.8562e-40, -5.0226e-40,
-2.6581e-40, 1.3006e-40, -1.4201e-40,
5.4264e-40, 2.3848e-40, 5.6412e-40,
-2.6378e-41, -5.7132e-40, -4.1343e-40,
-3.2848e-22, -3.6697e-22, -3.4147e-22,
-3.5780e-22, -3.9435e-22, -3.5989e-22,
-3.1212e-22, -3.4305e-22, -3.0670e-22,
-1.1749e-08, -1.1602e-08, -1.1494e-08,
-1.2125e-08, -1.1918e-08, -1.1718e-08,
-1.1779e-08, -1.1623e-08, -1.1559e-08,
-5.0237e-07, -4.9179e-07, -4.6744e-07,
-5.1967e-07, -5.0826e-07, -4.8421e-07,
-5.0226e-07, -4.9668e-07, -4.8019e-07,
5.6433e-41, -3.0514e-40, -5.4526e-40,
1.1125e-41, 2.9485e-40, 5.5282e-40,
3.0229e-40, 1.5915e-40, 5.3759e-40,
-6.1144e-27, -9.2380e-26, -2.4302e-25,
-9.3834e-25, -1.0289e-23, -1.9513e-23,
-4.3746e-24, -4.4359e-23, -7.0505e-23,
-8.1604e-36, -3.2928e-37, -2.2994e-40,
-3.9543e-37, -9.9513e-39, 7.4616e-41,
-4.0044e-39, 4.4392e-40, 4.8856e-40,
-3.3447e-40, -3.9935e-40, 2.4649e-40,
2.0207e-40, -3.0245e-40, -7.1986e-41,
6.2938e-40, -3.6922e-40, 1.5296e-40,
-6.4982e-41, 5.0849e-41, 5.7873e-40,
1.4327e-40, -4.2163e-40, 1.3807e-40,
2.8569e-40, 1.9139e-40, 3.2985e-40,
-5.4410e-40, 2.3070e-40, 2.1690e-40,
-1.5964e-40, -2.2781e-40, 5.6766e-40,
2.2533e-42, -2.5532e-40, -5.5822e-40,
5.7249e-40, 5.3555e-40, -4.9107e-41,
1.7538e-40, -1.2312e-40, 5.0077e-40,
6.1500e-40, 1.9980e-40, 6.2953e-40,
-7.5314e-23, -9.4299e-23, -7.1342e-23,
-8.5139e-23, -1.1237e-22, -9.0478e-23,
-6.2038e-23, -8.5180e-23, -7.3015e-23,
5.0613e-40, 1.5224e-40, -1.8977e-40,
2.4108e-41, -5.1771e-40, 6.2317e-40,
1.0465e-40, 2.8816e-41, 6.2500e-40,
3.5727e-40, 4.2717e-40, -3.5900e-40,
-4.4831e-40, 3.4260e-40, -4.8293e-40,
-2.4133e-40, 3.1140e-40, -2.0777e-40,
-2.2906e-41, 3.5923e-40, -4.4443e-40,
-4.6615e-40, -2.1123e-40, 4.5700e-40,
-4.6360e-40, -3.6052e-40, -3.4319e-40,
-3.6575e-40, -3.5707e-40, -3.0530e-41,
4.2531e-40, -1.2255e-40, -3.9607e-40,
3.5903e-40, -5.4630e-40, -3.1460e-40,
2.8820e-40, 4.9460e-40, 6.1461e-40,
8.9118e-41, -4.6579e-40, -2.4172e-40,
-5.5474e-40, -8.1848e-41, -1.6910e-40,
-1.6272e-25, -1.8802e-25, -1.7229e-25,
-1.7850e-25, -2.0338e-25, -1.8235e-25,
-1.4715e-25, -1.6733e-25, -1.4681e-25,
-5.5471e-09, -5.6862e-09, -5.7043e-09,
-5.8727e-09, -5.9823e-09, -5.8983e-09,
-5.8040e-09, -5.8670e-09, -5.7388e-09,
-9.7253e-07, -9.7248e-07, -9.4623e-07,
-1.0149e-06, -1.0042e-06, -9.6709e-07,
-1.0139e-06, -9.9930e-07, -9.5295e-07,
-4.5042e-40, 2.6725e-40, 2.3181e-40,
-4.6274e-41, -1.1799e-40, 5.0685e-40,
-1.0765e-40, 3.3322e-40, -6.1905e-40,
-1.3653e-34, -3.4690e-33, -1.1578e-32,
-1.4444e-31, -2.1995e-30, -4.8668e-30,
-1.2965e-30, -2.0189e-29, -3.3962e-29,
-2.5057e-40, 7.2876e-41, 4.5731e-41,
-1.6525e-40, 5.0987e-40, -5.4683e-40,
8.1836e-41, 6.2722e-40, -3.1057e-40,
4.0987e-40, 3.5941e-40, 5.1680e-40,
5.5563e-40, 3.1011e-40, 4.7068e-40,
1.0426e-40, -1.0803e-40, 4.4867e-40,
-4.9675e-03, 1.5412e-01, -4.1930e-03,
-6.1089e-02, 2.0405e-01, 1.9587e-01,
3.8772e-02, 1.6894e-01, -2.6163e-02,
1.0839e-30, 1.8608e-30, 1.1386e-30,
1.4863e-29, 1.9422e-29, 1.1639e-29,
1.7504e-29, 2.2177e-29, 1.3629e-29,
6.4484e-02, 6.6296e-02, 2.2838e-01,
-1.0213e-01, 7.5883e-02, -1.7531e-01,
-1.4869e-01, 1.0736e-01, 1.4129e-01,
-2.8235e-02, -2.9232e-02, -9.3912e-02,
5.1317e-02, 9.0256e-02, -2.4669e-02,
-3.2465e-02, 5.8099e-02, 9.8402e-02,
-2.3135e-01, -1.3786e-01, 2.8581e-01,
-3.2410e-01, -2.6623e-01, 6.1583e-02,
1.8696e-01, 4.7251e-02, -2.3520e-01,
2.5630e-02, -1.2358e-01, -1.5735e-01,
-1.2198e-01, 5.1970e-01, 1.9976e-01,
-1.2515e-01, 9.8768e-02, 5.8917e-02,
-3.8569e-02, -9.2729e-02, -1.8982e-01,
1.1378e-01, 5.7195e-01, -1.8265e-01,
-3.5724e-02, -2.1379e-01, -2.2129e-01,
-5.1198e-40, -3.4709e-40, 6.2940e-40,
-2.2134e-41, -3.6133e-40, -2.7075e-40,
-5.9664e-40, -2.3937e-40, 3.0876e-40,
9.1814e-41, 9.5898e-41, -3.1892e-40,
3.1093e-40, 2.7935e-40, 1.7966e-40,
-2.3967e-40, 4.0806e-40, 6.2012e-40,
5.3771e-41, 6.1000e-40, -4.6695e-40,
5.9474e-41, -4.9675e-40, 5.7403e-41,
4.7091e-40, -5.0751e-41, 3.9864e-41,
-9.7756e-41, 2.7978e-40, -5.0791e-40,
-3.4321e-40, -7.0774e-41, -5.2651e-40,
2.8034e-40, -3.3452e-40, 1.9535e-40,
-6.2300e-40, -1.8372e-40, -1.9038e-40,
-5.6564e-40, -6.1257e-40, -1.0338e-40,
-1.7191e-41, -1.2843e-41, 5.0707e-40,
-4.4587e-40, 2.7128e-40, -1.4155e-40,
-5.7475e-40, -3.4612e-40, -4.7424e-40,
1.7235e-40, -6.0028e-40, -1.6342e-40,
-5.1072e-40, -2.4721e-40, -2.8477e-41,
2.6598e-40, -4.4078e-40, 4.1763e-40,
-3.3947e-40, -5.5626e-40, 4.9713e-40,
2.1733e-40, -2.9024e-40, -4.5514e-42,
-3.4873e-40, -1.0737e-40, -1.4297e-40,
2.8514e-40, 2.6283e-40, 2.2827e-40,
3.8908e-40, -4.2140e-40, 6.1433e-40,
-4.7825e-40, -3.0140e-40, -5.9563e-40,
1.5280e-40, 2.6156e-40, 5.0361e-40,
1.9497e-01, 2.3140e-01, -3.5244e-02,
1.6876e-01, -1.7646e-02, -2.0413e-01,
9.8052e-02, -6.7906e-02, -3.9834e-02,
-5.9252e-15, -6.7431e-15, -8.1865e-15,
-5.7350e-15, -6.6893e-15, -8.9833e-15,
-8.4106e-15, -1.0631e-14, -1.5948e-14,
8.9389e-02, 6.6460e-02, 6.8477e-02,
6.1099e-03, -8.7536e-02, 1.1792e-01,
-1.0079e-01, 1.5293e-01, 4.3945e-02,
1.0168e-01, 1.0281e-01, -7.9173e-02,
2.0855e-01, 1.7537e-01, -7.1000e-02,
-1.4157e-01, -3.8478e-02, -2.7478e-01,
2.2156e-01, -6.4262e-02, -7.2841e-02,
-3.2334e-01, 6.5591e-02, 1.1163e-01,
7.2151e-02, -1.6943e-01, 5.9049e-02,
-1.4813e-01, -2.0904e-01, -8.8010e-02,
-2.7215e-01, 5.7668e-01, 1.7618e-02,
-7.1365e-02, 1.2976e-01, -1.0169e-01,
-8.9229e-02, 3.3971e-02, 1.8295e-01,
1.7204e-01, 3.8082e-01, 3.7415e-02,
5.9309e-02, -4.9550e-04, 5.1555e-01,
-5.1006e-18, -5.6038e-18, -5.8724e-18,
-5.8910e-18, -5.8379e-18, -5.6311e-18,
-5.2596e-18, -5.1835e-18, -4.6300e-18,
6.4067e-02, 1.8889e-02, -1.0634e-01,
1.7316e-04, 1.9935e-01, -1.1854e-02,
-9.3669e-02, -1.1924e-01, -1.8981e-02,
1.7465e-08, 1.7340e-08, 1.7565e-08,
1.8234e-08, 1.8008e-08, 1.8017e-08,
1.9226e-08, 1.8956e-08, 1.8651e-08,
-1.7294e-01, -1.2200e-01, -4.9577e-02,
-3.5087e-02, -1.2526e-01, 9.3445e-03,
-7.4374e-02, -1.1350e-01, 2.7510e-03,
8.5153e-02, 4.2080e-02, -5.0111e-02,
1.2845e-01, 1.9630e-01, 1.0542e-01,
-1.0095e-01, 6.2631e-02, 8.8734e-02,
3.4836e-01, 5.4389e-01, -2.2360e-01,
5.1721e-01, 5.7094e-01, -6.7491e-02,
-3.5972e-02, 1.0590e-01, -2.2984e-01,
-1.5483e-01, -5.1271e-03, 4.9780e-02,
-1.3184e-01, 2.8028e-01, -1.1427e-02,
-3.4093e-02, -6.7622e-02, -1.2359e-02,
1.3184e-02, 1.2125e-01, -1.2502e-02,
9.2730e-02, -6.5974e-02, -1.6519e-01,
1.9546e-01, -1.5188e-01, -8.1752e-02
}
,
{
-3.4905e-04, -3.5739e-04, -3.2920e-04,
-3.8506e-04, -3.9121e-04, -3.5635e-04,
-3.7303e-04, -3.7698e-04, -3.4190e-04,
2.8622e-41, -1.2033e-41, 1.2609e-40,
-4.9379e-40, -5.1047e-40, 5.5085e-41,
-4.7002e-40, -5.0136e-40, -4.5629e-40,
-5.1095e-40, 1.8741e-40, 1.8435e-40,
4.1851e-40, -8.9558e-41, -9.6681e-41,
-1.8244e-40, 2.7992e-40, 1.8116e-40,
2.8655e-40, -3.0193e-40, 2.2293e-40,
1.6805e-40, 3.3049e-40, 6.9542e-41,
-3.3329e-40, 4.2212e-40, -1.3453e-40,
-8.4502e-15, -1.1099e-14, -9.4174e-15,
-9.8778e-15, -1.1768e-14, -9.4875e-15,
-6.7805e-15, -7.4561e-15, -5.8023e-15,
6.0452e-40, 6.9262e-41, 2.9300e-40,
-6.1511e-40, -4.1269e-40, 4.4012e-40,
1.3340e-42, -2.9020e-40, -4.5529e-40,
-1.2289e-22, -1.3972e-21, -5.5694e-21,
-1.7854e-21, -1.7743e-20, -5.6749e-20,
-6.8510e-21, -6.2353e-20, -1.6203e-19,
-5.0003e-07, -5.1950e-07, -4.7654e-07,
-5.5510e-07, -5.7995e-07, -5.2753e-07,
-5.3262e-07, -5.5802e-07, -5.0971e-07,
-1.4922e-02, -1.1926e-01, -1.9067e-02,
-2.6298e-03, 2.1756e-01, 3.0148e-02,
1.4372e-01, 3.5066e-02, -1.0184e-02,
-4.1698e-12, -4.8798e-12, -6.4033e-12,
-2.3169e-12, -2.7879e-12, -3.7276e-12,
-1.6177e-12, -2.0021e-12, -2.6440e-12,
-5.9514e-40, -4.4339e-40, -3.0315e-40,
3.5756e-40, 2.5390e-40, -1.2253e-40,
2.1417e-40, 4.0569e-40, 5.3962e-40,
-5.5825e-13, -6.8528e-13, -9.3486e-13,
-2.9163e-13, -3.6959e-13, -5.1183e-13,
-1.8703e-13, -2.4740e-13, -3.4019e-13,
-2.7137e-01, -4.5025e-01, 2.6405e-02,
-7.9580e-02, 5.0698e-01, -7.8794e-02,
-3.7540e-02, -7.1115e-03, -3.9741e-01,
-5.9910e-40, -5.5101e-40, 3.1274e-41,
-6.9384e-41, -4.9294e-40, -1.0818e-40,
-3.5484e-40, -4.7965e-41, -5.2508e-41,
4.1917e-01, -1.6207e-02, -6.8506e-02,
-2.7060e-02, 5.6162e-01, 1.6696e-01,
-1.7677e-03, 1.8842e-01, -6.0493e-02,
-3.0696e-01, -1.7293e-01, -8.7143e-02,
-1.6740e-01, 1.8861e-02, -1.7112e-01,
8.6594e-02, 3.0025e-01, -7.6141e-02,
1.1317e-02, 1.0678e-01, -5.1283e-02,
-1.2872e-01, 4.2580e-01, 4.9678e-02,
-2.8372e-01, -1.3479e-01, -7.3813e-02,
-1.7038e-15, -1.1156e-15, -7.3385e-16,
-2.6350e-15, -1.6234e-15, -1.0598e-15,
-7.7860e-15, -4.6981e-15, -3.0030e-15,
-3.0246e-40, -4.1596e-40, 2.9013e-40,
8.5195e-41, -2.2396e-40, -2.0322e-40,
-5.6200e-40, 2.4820e-40, 3.1309e-40,
-3.1822e-17, -1.6585e-17, -8.8616e-18,
-5.9907e-17, -2.9812e-17, -1.6126e-17,
-2.4410e-16, -1.2541e-16, -6.7867e-17,
1.5795e-01, -1.4429e-01, -6.0501e-02,
5.9113e-02, 3.4391e-01, 1.4165e-01,
5.2564e-02, -1.8209e-01, -6.8176e-02,
-7.7363e-41, 5.9969e-40, 5.9290e-40,
-7.4888e-41, -7.0945e-41, 5.3120e-40,
1.3612e-40, -4.6718e-40, -1.0677e-40,
-1.1498e-01, -1.2925e-02, 2.6735e-02,
-8.1469e-02, 2.9678e-01, 1.8971e-01,
2.0149e-02, 2.4207e-03, -1.2549e-01,
-6.6799e-02, -3.5900e-02, -5.6111e-02,
9.5181e-02, 2.1216e-02, 2.0477e-01,
8.5923e-03, 6.8615e-03, 3.8252e-02,
4.5098e-03, 2.1321e-01, 3.4612e-03,
3.5662e-01, 4.7532e-02, 2.5319e-01,
4.1275e-02, 1.7951e-01, 3.2239e-02,
-2.6628e-21, -7.7165e-22, -4.9086e-22,
-1.4320e-21, -2.7134e-22, -1.2712e-22,
-1.9648e-21, -3.4172e-22, -1.3895e-22,
-2.2836e-40, 3.2091e-40, -4.4396e-40,
2.9048e-40, 6.0866e-40, 3.7804e-40,
-3.0676e-40, -2.4897e-40, 4.9891e-40,
-1.8955e-28, -3.4994e-29, -1.2914e-29,
-4.7737e-29, -3.5212e-30, -6.4003e-31,
-8.2908e-29, -3.1692e-30, -3.6909e-31,
-9.3327e-02, 1.5314e-01, 1.0676e-01,
2.5979e-01, -6.6826e-01, 2.3727e-01,
1.4855e-01, 1.9205e-01, 8.8246e-02,
-5.5197e-40, 5.3162e-41, -5.2933e-40,
1.0846e-41, -5.8128e-40, -3.1273e-40,
-2.8408e-40, 1.6989e-40, 4.8221e-41,
7.8403e-02, 1.6407e-01, 7.9932e-02,
3.2253e-01, -2.6036e-01, -8.9727e-02,
-7.5145e-02, 1.5536e-02, -8.2710e-02,
-2.1608e-01, -4.4619e-01, -4.4470e-02,
-3.9430e-01, -8.2373e-01, -7.0646e-01,
-6.9004e-03, -4.9697e-01, -1.4212e-01,
-1.8932e-06, -1.8356e-06, -1.6373e-06,
-1.9427e-06, -1.9113e-06, -1.7028e-06,
-1.8843e-06, -1.8616e-06, -1.6818e-06,
-4.7452e-29, -4.4894e-29, -2.5364e-29,
-5.6268e-29, -5.4363e-29, -3.0876e-29,
-4.3808e-29, -4.2767e-29, -2.4573e-29,
3.8855e-40, 3.5152e-40, -4.8707e-40,
4.3606e-41, -1.7886e-40, 5.1970e-40,
6.2864e-40, 5.9972e-40, 2.2197e-40,
-2.1903e-37, -1.9174e-37, -7.0785e-38,
-2.7149e-37, -2.4810e-37, -9.5619e-38,
-1.8463e-37, -1.7136e-37, -6.7163e-38,
-2.9062e-30, -3.1324e-30, -1.0876e-30,
-2.7434e-30, -3.7036e-30, -1.2821e-30,
-6.8828e-31, -9.8708e-31, -3.7930e-31,
-6.3329e-41, -3.8604e-41, -2.8272e-40,
-3.3350e-40, -1.5210e-40, -4.2620e-41,
-1.7669e-41, 5.2291e-40, -3.3205e-40,
-3.0738e-25, -8.2305e-24, -2.1451e-23,
-1.4470e-24, -4.5131e-23, -1.2177e-22,
-4.2841e-24, -1.3077e-22, -3.5946e-22,
-8.5637e-08, -8.4715e-08, -7.7597e-08,
-8.7326e-08, -8.7480e-08, -8.0290e-08,
-8.4525e-08, -8.4963e-08, -7.8582e-08,
-5.8581e-27, -8.8483e-27, -8.1150e-27,
-7.4336e-27, -1.2036e-26, -1.1909e-26,
-6.6006e-27, -1.0685e-26, -1.0809e-26,
-5.6355e-40, -2.3469e-40, -3.5885e-40,
-2.0755e-40, 2.0377e-40, 3.2259e-40,
-5.3947e-40, 4.2747e-41, 4.8967e-41,
4.5073e-41, 5.0069e-40, 2.6114e-40,
-4.8225e-40, -4.8317e-40, -5.4316e-40,
-5.4335e-40, -5.2994e-40, 2.6295e-40,
-1.1702e-40, -2.3137e-41, -4.5405e-40,
-4.6797e-40, 6.5582e-41, 1.8111e-40,
6.1477e-40, -1.6827e-40, -2.0288e-40,
-2.4220e-41, 4.7774e-40, 5.1050e-40,
4.9844e-40, 5.6437e-41, 4.7749e-40,
-6.8037e-41, -5.5944e-41, -5.2248e-40,
-2.9382e-40, 2.3800e-41, 1.5850e-40,
-4.5290e-40, -5.2260e-41, 2.3726e-40,
-1.9232e-40, -2.3502e-40, -2.9736e-40,
-2.8081e-40, -5.2929e-40, -4.0786e-40,
-3.0303e-41, 3.1336e-40, -5.8450e-40,
-1.5091e-40, -2.7371e-40, -4.5927e-40,
-4.0985e-38, -6.9102e-38, -5.4450e-38,
-6.2744e-38, -1.1526e-37, -9.9374e-38,
-4.8587e-38, -9.1819e-38, -8.0593e-38,
-2.9266e-29, -4.5005e-29, -3.9891e-29,
-3.8505e-29, -6.3370e-29, -6.0017e-29,
-3.2761e-29, -5.4145e-29, -5.1812e-29,
3.3692e-40, 1.0044e-40, -6.6821e-41,
9.2910e-41, 6.2137e-40, -3.5625e-40,
1.8601e-40, 3.1653e-40, -1.1506e-40,
1.2093e-40, -5.7191e-40, 5.6828e-40,
-2.3177e-40, -2.1648e-40, 5.3642e-40,
4.8826e-40, 5.2760e-40, -4.9059e-40,
-2.0721e-40, 2.0122e-40, -5.9485e-40,
3.8843e-40, -6.0861e-41, -4.0542e-40,
-3.4308e-40, -4.2822e-40, -3.9605e-40,
-5.7429e-40, 4.9242e-40, -5.9141e-40,
4.6267e-40, -2.4953e-40, -2.9300e-40,
5.3466e-40, -5.2403e-40, 3.5178e-40,
-1.8309e-40, 2.9157e-40, -7.7367e-41,
-5.8922e-40, 3.2359e-40, -6.1293e-40,
6.1138e-40, 2.2121e-40, -5.0657e-42,
4.7910e-40, -1.4080e-40, 1.9220e-40,
-3.5670e-40, 3.4204e-40, -5.0215e-40,
1.1877e-41, 2.3114e-40, -4.7794e-40,
-3.6520e-40, 4.3222e-40, -5.2866e-40,
-6.0703e-40, -4.0896e-40, -1.2521e-40,
-4.1981e-40, 5.4404e-41, 3.3337e-40,
1.3733e-01, 1.8485e-01, 7.6179e-02,
8.1719e-02, 3.3343e-01, 2.9857e-02,
-4.2753e-03, 2.0957e-01, 1.8582e-02,
2.9948e-07, 3.3403e-07, 3.7619e-07,
3.4854e-07, 3.8224e-07, 4.1507e-07,
3.7511e-07, 4.0398e-07, 4.3743e-07,
-1.7150e-41, -2.4088e-41, -1.5593e-40,
6.3817e-41, 4.8004e-41, -1.1053e-40,
-2.5225e-40, -2.7111e-40, -4.2970e-40,
1.0496e-06, 1.0916e-06, 1.1376e-06,
1.1364e-06, 1.1756e-06, 1.2051e-06,
1.1762e-06, 1.2105e-06, 1.2358e-06,
1.0037e-02, 1.4957e-01, -4.9010e-02,
2.6877e-02, 1.9067e-01, -1.9339e-03,
-2.2081e-02, -1.5137e-01, -1.6088e-01,
1.6880e-41, -2.0352e-41, -4.1857e-42,
2.0926e-40, -2.1394e-41, -5.4341e-40,
4.6824e-40, 6.2682e-40, 4.9865e-40,
-3.2967e-01, -2.5981e-01, -1.3016e-01,
-2.6507e-01, 3.2282e-01, 4.3204e-01,
-7.0936e-02, 1.9800e-01, 9.4916e-02,
-1.0122e-02, 7.4127e-02, -7.1554e-02,
7.7869e-02, 1.5734e-01, 1.3287e-01,
-9.5431e-02, 1.0984e-01, -7.6759e-02
}
,
{
-5.5262e-40, 3.7699e-40, -1.4920e-40,
4.0064e-40, -2.0632e-40, -4.4801e-41,
-3.6749e-40, 5.9043e-40, -1.5942e-40,
-5.9219e-42, -4.1286e-40, -1.6920e-40,
-2.5927e-40, -4.5458e-41, 2.0990e-40,
-4.6860e-40, 5.0483e-40, 2.8004e-40,
-4.0641e-40, 6.0770e-40, -3.8297e-42,
5.7537e-40, 5.7772e-40, -1.0048e-40,
1.5945e-40, 3.9582e-40, -2.6190e-40,
-5.1046e-40, -5.5028e-40, 5.8786e-40,
-3.5033e-40, -1.2031e-40, -3.4156e-40,
3.0058e-40, 4.3043e-40, 5.9825e-40,
4.9197e-40, 2.5974e-40, -4.3461e-41,
-4.1935e-40, -1.6383e-41, -1.4680e-40,
-5.3501e-40, -2.6348e-40, 3.0631e-40,
-5.2019e-40, -4.4123e-40, 2.3984e-40,
-4.4682e-41, -4.6000e-40, -5.0418e-40,
-4.1263e-40, 4.5391e-40, 2.8844e-40,
5.2179e-40, -1.3188e-40, 5.1600e-40,
-2.2913e-40, -3.1127e-40, 5.4478e-40,
2.3395e-41, 5.4758e-40, 2.0998e-40,
-1.9914e-10, -2.0700e-10, -1.9815e-10,
-2.1098e-10, -2.1989e-10, -2.1131e-10,
-2.0797e-10, -2.1693e-10, -2.0860e-10,
-2.1061e-40, -2.1208e-40, -3.3698e-40,
3.2370e-40, 2.9276e-40, -3.6860e-40,
3.4752e-40, -2.0660e-40, -3.8183e-40,
-8.0136e-02, 1.3809e-02, 1.6846e-03,
3.7960e-02, 8.7557e-02, -3.5498e-01,
9.8165e-03, 9.8384e-02, 1.2395e-01,
-2.8751e-02, 9.9172e-02, 5.5841e-02,
-4.0383e-02, 1.0856e-01, -5.4339e-01,
1.3245e-02, -4.7642e-02, -1.0427e-01,
-7.4696e-03, 5.0806e-02, -1.7179e-01,
5.0303e-02, -4.0322e-01, 7.4760e-01,
-9.2342e-02, 1.1958e-01, -1.8871e-01,
3.7044e-40, -4.6951e-40, -1.9873e-40,
5.3289e-41, 2.7689e-40, -4.6994e-41,
-3.1404e-40, -5.9106e-40, 6.0436e-40,
-6.0294e-40, -3.6565e-40, -1.1884e-40,
5.5933e-40, -9.5741e-41, 4.4736e-40,
4.3267e-40, -4.9583e-40, 3.4437e-40,
-1.7432e-40, 1.4518e-40, 2.1033e-40,
-3.4667e-40, 1.7222e-40, -2.5651e-40,
-5.2517e-40, 2.8983e-41, -1.3832e-40,
-1.4153e-01, 9.4023e-02, -9.8526e-02,
2.0678e-01, 4.0842e-01, -1.1853e-01,
-1.4108e-01, -1.1005e-01, -8.1274e-02,
3.4336e-41, 1.5625e-40, 2.7213e-40,
-5.3447e-40, -3.7330e-40, -3.3637e-40,
-4.3563e-40, -3.7094e-40, 1.2820e-41,
-8.1700e-02, -1.8215e-01, -1.6011e-01,
-1.4203e-01, 5.3791e-02, -3.7663e-02,
-1.1705e-01, -1.2604e-01, -8.4890e-03,
-6.1578e-02, -3.3907e-01, 2.2344e-03,
1.5060e-01, -1.9199e-01, -5.5274e-02,
6.2300e-02, 9.1084e-02, 1.3788e-02,
4.9025e-02, 3.3738e-01, -1.8104e-01,
-2.5051e-01, 8.2363e-02, 2.0325e-01,
5.6988e-02, -1.5118e-01, 6.8897e-02,
-4.6233e-40, 1.2244e-40, -3.9802e-40,
5.8530e-40, -2.4162e-40, 4.6793e-40,
-4.8362e-40, 3.3071e-40, 1.7094e-40,
3.5249e-40, -4.8579e-40, 1.9374e-40,
6.2372e-42, 5.8402e-41, 3.2851e-40,
6.1488e-40, 1.8086e-40, -5.2451e-40,
-3.0723e-40, -5.6704e-40, -5.9899e-40,
-3.5975e-40, -1.3818e-40, -2.7285e-40,
2.4468e-40, 8.3606e-41, 1.8818e-40,
-2.3749e-01, -2.7008e-01, -1.5222e-03,
1.4806e-01, 9.0783e-02, 2.7170e-02,
1.8706e-01, 1.8162e-01, -1.1799e-01,
-1.9852e-40, -4.8879e-40, -3.1971e-40,
-1.0245e-40, 9.1421e-41, 5.3018e-40,
2.2240e-40, -1.4666e-40, -4.4259e-40,
1.1835e-01, -2.7624e-01, 1.1446e-01,
1.3574e-01, 4.3109e-01, 1.3227e-01,
3.2554e-02, 1.7139e-01, -1.1988e-01,
3.5376e-02, 8.9191e-02, 6.7643e-02,
-8.2716e-02, 2.4178e-01, 6.0818e-02,
-6.7722e-02, -3.3712e-02, 3.0664e-02,
-6.6948e-02, 2.2886e-01, 1.8143e-01,
1.8636e-01, -2.4800e-01, 1.7185e-01,
-6.5479e-03, 1.8828e-01, -7.4464e-02,
-2.8281e-30, -5.8969e-31, -2.3180e-31,
-1.6163e-30, -3.8426e-31, -1.6788e-31,
-1.9412e-30, -4.1995e-31, -1.7651e-31,
-2.0525e-40, 4.6680e-40, 5.9108e-41,
1.0336e-40, -5.7226e-41, -6.1906e-40,
-1.8693e-40, 5.5777e-40, 6.0898e-40,
-3.4735e-41, -3.2674e-40, -2.3864e-41,
-3.3596e-40, 3.3107e-40, 1.0843e-40,
5.1103e-40, 6.0598e-40, -3.6267e-40,
-4.5583e-03, -1.0635e-01, -7.4962e-02,
-1.2741e-01, 2.7234e-01, 1.0508e-01,
-2.1207e-01, 9.6720e-02, 3.4641e-02,
1.1304e-12, 1.1614e-12, 9.7086e-13,
1.3361e-12, 1.3697e-12, 1.1286e-12,
1.2620e-12, 1.2938e-12, 1.0680e-12,
-8.4197e-02, 6.3834e-02, 2.3157e-02,
-2.1280e-02, 2.9074e-01, 8.5883e-02,
-1.3695e-01, -1.6047e-01, -4.5834e-02,
-1.3848e-01, -6.6090e-02, -7.7201e-02,
-5.1963e-02, 6.0643e-02, -4.9932e-02,
1.1779e-01, 1.7521e-01, 3.0366e-02,
4.7601e-03, 4.3941e-02, -3.5985e-02,
1.7692e-02, -2.3705e-01, 2.1062e-01,
7.7174e-02, -7.6616e-02, 2.0102e-02,
-3.6353e-06, -3.5534e-06, -3.2461e-06,
-3.6813e-06, -3.6196e-06, -3.3222e-06,
-3.5581e-06, -3.5179e-06, -3.2504e-06,
-7.3892e-11, -7.2930e-11, -6.8104e-11,
-7.9244e-11, -7.7770e-11, -7.2319e-11,
-7.7297e-11, -7.5673e-11, -7.0195e-11,
-1.5180e-10, -1.5027e-10, -1.4244e-10,
-1.6013e-10, -1.5761e-10, -1.4940e-10,
-1.5682e-10, -1.5395e-10, -1.4553e-10,
-9.1167e-02, 1.2374e-01, -3.8304e-02,
2.2641e-01, 2.4855e-01, -4.3174e-02,
1.4364e-01, 1.8438e-01, 1.1617e-02,
6.1925e-40, 3.3333e-40, 1.8962e-40,
3.2481e-40, -1.7566e-40, -3.0456e-40,
2.7654e-40, 3.8422e-41, 4.9191e-40,
7.5657e-02, -1.0697e-03, 3.0319e-02,
-4.7642e-02, -9.4454e-02, -2.6543e-02,
-5.3129e-02, -1.9667e-01, -1.0851e-01,
-8.5909e-03, 1.2177e-01, 2.6434e-01,
2.4468e-02, 5.0484e-02, 3.4698e-01,
-1.4764e-03, 3.7374e-02, 1.2658e-01,
2.0602e-02, -2.4624e-02, 1.3741e-01,
1.8641e-02, 4.0484e-01, 3.2976e-01,
-4.4809e-01, -3.2104e-03, 1.6290e-03,
8.1306e-41, 2.0311e-40, 2.9683e-40,
-5.7636e-40, 4.4291e-40, 4.3356e-40,
-7.1797e-41, 4.5366e-40, 3.9953e-40,
-4.5418e-40, 4.1805e-40, -3.2458e-41,
-9.4881e-41, -8.6365e-41, -1.9294e-40,
7.1954e-41, -9.8565e-41, -5.5540e-40,
-5.3769e-40, 1.4094e-40, -1.5355e-40,
8.8038e-41, -3.6848e-40, -1.2237e-40,
-2.8267e-41, -1.7583e-40, -5.9647e-40,
1.0929e-01, 2.9895e-02, -1.4923e-01,
-1.1234e-01, -1.0514e-01, -1.3280e-02,
2.2255e-01, 6.4152e-03, -1.6309e-02,
-1.5899e-40, -7.2549e-41, -2.6734e-40,
-3.3842e-40, 3.3255e-40, 4.2694e-40,
5.2940e-40, 3.2455e-40, -3.7081e-40,
6.3639e-02, -3.3720e-02, -2.3453e-02,
1.9477e-01, 5.2267e-02, 1.8565e-02,
1.6048e-01, 2.7636e-01, 1.5930e-02,
1.7673e-03, 6.3646e-02, -1.5127e-02,
-3.7787e-02, -1.4037e-01, -3.6231e-02,
-1.5636e-02, -7.8742e-02, -2.4137e-02,
-5.0748e-02, 6.5641e-02, -2.5353e-03,
8.4955e-02, 7.4231e-01, 1.3795e-01,
-1.4552e-01, 2.0869e-01, 4.0739e-02,
-2.0015e-41, 5.2988e-40, 2.7578e-40,
4.1051e-40, 1.2834e-40, -3.4898e-40,
-1.1975e-40, 4.2374e-40, -3.0404e-41,
-6.3014e-40, 4.6330e-40, -4.4141e-41,
2.5442e-41, 5.7456e-40, 2.3848e-40,
-1.0788e-40, -5.0563e-40, -5.3638e-41,
3.5728e-40, 1.9752e-40, 6.1004e-40,
2.8189e-41, -6.2151e-40, 1.1807e-41,
6.5305e-41, 5.2028e-40, 1.3692e-40,
6.4391e-02, -1.3079e-01, -3.7980e-02,
-3.2362e-01, -3.7239e-01, -8.0182e-02,
-2.6787e-01, -3.1240e-01, -1.2798e-02,
-1.2072e-40, 5.3996e-40, -3.4352e-40,
-8.0996e-41, -3.0208e-40, 3.1848e-40,
-5.6407e-40, 2.4674e-41, -2.1055e-40,
-9.2897e-02, 1.8040e-01, -4.3269e-01,
-7.6669e-02, 4.3554e-01, -4.4870e-02,
-2.3249e-02, -1.1805e-01, 1.0507e-01,
-5.2540e-02, -3.6856e-01, 1.1246e-01,
-2.3632e-02, 1.3165e-01, -1.5380e-02,
-1.1467e-02, -5.3754e-02, -4.1619e-02,
-1.5635e-01, 3.8584e-01, -1.4434e-01,
1.7523e-01, 3.7253e-02, 4.9784e-01,
5.8484e-02, -8.4711e-02, -7.7498e-02,
-1.6956e-40, 5.4293e-41, -2.5140e-40,
-3.1995e-40, -4.8337e-40, 2.5539e-40,
-1.1449e-40, 1.9503e-40, -1.7368e-40,
5.4753e-40, 5.9720e-40, -4.7821e-40,
3.8830e-40, -3.1984e-40, -2.7163e-40,
-5.3411e-40, 7.2638e-41, 4.3186e-40,
4.6654e-40, -5.9540e-40, -2.8155e-40,
-1.4801e-40, -1.6945e-40, 1.9723e-40,
5.8380e-40, -6.1587e-40, 3.3667e-40,
-2.9327e-02, -4.2746e-02, -1.5018e-01,
8.6354e-02, 2.8140e-01, 1.2970e-02,
-2.0755e-01, 6.7548e-02, -3.6049e-02
}
,
{
9.5728e-41, 5.3991e-40, -1.3764e-40,
-2.0389e-40, 2.4254e-40, 3.3492e-40,
6.5289e-41, -3.0842e-40, 5.5850e-40,
7.7599e-02, 2.5043e-02, -1.4099e-02,
-3.3184e-02, 5.6863e-01, -2.7001e-02,
-5.2659e-02, 5.4713e-02, 2.3991e-03,
2.2010e-02, -3.9120e-02, -1.1558e-01,
9.1633e-02, 1.3070e-01, 1.2489e-01,
-4.4040e-02, -1.6324e-02, -4.9631e-02,
-7.3548e-02, -2.0492e-01, 1.4043e-01,
-6.0411e-02, 5.7710e-02, -3.6840e-02,
1.3173e-02, 2.3215e-03, 1.1820e-02,
2.5772e-02, -1.3436e-01, -5.9285e-02,
-9.3983e-02, 1.1545e-01, 1.1602e-01,
-1.8505e-02, 6.1498e-02, -1.3097e-02,
9.8690e-03, -2.1338e-02, -1.2175e-01,
1.7936e-02, -2.7811e-02, 6.7037e-02,
-5.1401e-03, 7.6421e-02, -1.0794e-01,
4.6409e-02, 3.4701e-01, 2.6587e-02,
8.4175e-02, 5.2712e-01, 6.8999e-02,
-8.0756e-02, 1.9648e-01, -8.4639e-02,
1.2818e-01, 4.0660e-02, 7.6715e-02,
8.7991e-02, 4.6556e-01, -4.0025e-02,
2.1251e-03, -8.3784e-03, 5.9859e-02,
1.9835e-40, -3.4675e-40, -7.9692e-41,
-1.4304e-40, 2.3927e-40, -5.9796e-40,
3.8209e-40, -6.3260e-41, -9.2501e-41,
3.2007e-01, 1.5800e-01, -1.9594e-02,
-4.5315e-02, 1.0536e-01, -8.0692e-02,
2.1185e-01, -3.1418e-01, -1.5257e-01,
8.6294e-02, -1.3398e-01, -1.0694e-01,
8.6084e-02, -1.2393e-03, 1.7549e-02,
-1.5504e-01, -1.3112e-01, -3.5905e-02,
-3.8190e-01, 3.8393e-01, 1.6587e-02,
1.5002e-01, 1.9586e-01, -2.6260e-01,
-4.0159e-02, -8.2891e-02, -1.7761e-01,
-1.8611e-01, -1.1241e-02, -4.2538e-02,
-5.7898e-02, 2.4583e-01, 4.1590e-02,
2.4890e-02, 7.9409e-03, -2.7418e-02,
6.6194e-03, -4.2441e-02, -1.1167e-01,
-1.3236e-01, -7.9642e-02, -6.0623e-02,
-4.7198e-03, 5.6904e-02, 1.2651e-01,
1.2925e-01, -5.9162e-02, -9.1949e-04,
1.8668e-02, -2.6361e-02, -7.1042e-03,
-4.3178e-02, 2.6050e-04, 4.4799e-02,
7.9674e-02, 2.7656e-02, 7.1211e-03,
1.1463e-01, 1.0765e-01, 7.6066e-02,
-8.0780e-02, -5.4875e-02, 1.5209e-02,
-3.7365e-13, -3.7819e-13, -3.5929e-13,
-4.0298e-13, -4.0881e-13, -3.9033e-13,
-3.9409e-13, -3.9950e-13, -3.8277e-13,
-1.7847e-02, -1.7537e-02, -3.7313e-03,
2.6531e-02, 7.5951e-02, -4.0134e-03,
1.7387e-02, 6.0044e-02, -9.0211e-02,
2.7091e-02, 8.8333e-02, 1.0619e-01,
5.0470e-02, 1.2406e-02, 1.5503e-01,
-1.5936e-02, -2.2422e-01, -2.4640e-02,
-8.2430e-03, -1.4097e-02, -6.2474e-02,
8.0534e-02, 1.8603e-01, -3.1725e-02,
-3.1621e-03, 2.0362e-03, -1.4002e-01,
-7.3799e-03, 1.5881e-01, 6.7195e-02,
4.5946e-02, 2.4358e-01, 1.4677e-01,
-7.4788e-02, 6.7297e-02, 9.0735e-02,
-8.4553e-03, -1.1877e-02, 4.4209e-02,
-1.4281e-02, -6.8849e-02, -4.1386e-03,
3.2286e-02, 4.7128e-02, -1.2988e-02,
-2.2990e-02, -8.9265e-02, 6.4050e-02,
-2.3354e-02, 1.3846e-01, -1.6256e-01,
-6.5661e-02, -2.8983e-02, -4.3497e-02,
1.0597e-02, -2.3534e-02, -2.6068e-02,
-7.8812e-02, 1.9502e-01, 6.8938e-03,
3.2025e-02, 2.3353e-02, 4.9225e-02,
-5.0273e-40, 1.2403e-41, 5.8127e-40,
3.2777e-40, -3.5740e-40, 4.9781e-40,
-2.4198e-40, -4.6311e-40, 1.3330e-40,
-3.0803e-01, 1.7804e-01, 1.0604e-01,
4.1405e-01, 1.9740e-01, -5.3067e-02,
2.3738e-01, -1.6828e-01, 1.5338e-01,
6.6857e-03, 1.8623e-01, -1.2126e-01,
-1.6323e-01, -1.2719e-02, -1.7743e-01,
-1.3612e-01, -3.4442e-02, -1.0552e-01,
-1.4560e-01, 1.8771e-01, 8.4508e-02,
5.8732e-02, -2.2378e-01, 1.2673e-01,
3.0455e-03, 3.8438e-02, -6.2235e-02,
1.9951e-02, 2.6963e-01, -1.8594e-01,
-8.6550e-02, -1.3097e-01, -3.5032e-02,
2.0423e-02, -9.0499e-02, 1.7130e-01,
-1.8592e-01, 6.6808e-02, -1.5768e-01,
-6.4402e-02, -1.2265e-01, 6.8487e-02,
1.9899e-02, 9.3376e-02, 7.8577e-02,
-1.3384e-01, -7.6429e-02, 1.7142e-02,
-1.2385e-01, -1.1821e-01, -1.2716e-03,
5.3770e-02, 1.4973e-01, 1.4762e-01,
-4.7688e-02, -1.1733e-01, -1.5032e-01,
-2.0699e-01, -9.4949e-02, -2.6374e-02,
4.4489e-02, 1.8376e-02, -7.6844e-02,
1.8831e-40, -2.6056e-40, -4.7602e-40,
-3.4079e-40, 1.5054e-40, 1.2387e-40,
2.3040e-40, 1.4644e-40, 5.6365e-40,
-2.0809e-02, 5.3674e-03, 1.7057e-03,
2.4160e-01, 4.1348e-01, 3.5215e-02,
8.2154e-02, 2.0431e-01, 1.0366e-01,
-1.5149e-02, 1.0521e-01, -4.1706e-02,
-5.0651e-02, 2.3615e-02, -9.3860e-02,
-1.0823e-01, -6.3645e-02, -1.1573e-01,
-2.4116e-02, 1.3546e-02, -1.0298e-03,
1.2102e-02, 2.2630e-02, 1.1375e-01,
1.3966e-02, 1.0754e-01, 1.6621e-01,
1.6213e-02, 2.0816e-01, 8.9441e-02,
-7.5452e-02, 3.4580e-03, -3.3317e-01,
5.0917e-02, 1.3898e-01, -1.0723e-01,
6.0473e-03, 8.9741e-02, -6.8206e-02,
-7.1770e-02, -3.5661e-01, -2.8935e-01,
-1.6324e-02, 2.5728e-02, -1.1281e-02,
-1.3390e-01, -9.3090e-02, 4.3366e-02,
4.8620e-02, 1.4917e-01, 1.6295e-01,
2.4123e-03, -7.6347e-02, -8.0226e-02,
6.0740e-03, 3.7065e-02, 4.5518e-04,
-1.3793e-01, 2.3848e-01, -1.1199e-01,
1.0422e-01, 1.1214e-01, 3.3457e-02,
-3.2827e-40, 5.9135e-40, 3.3773e-40,
-5.8903e-40, -5.9439e-41, 1.9973e-40,
-3.6141e-40, -4.7563e-40, -1.0222e-40,
7.3457e-02, -8.2031e-02, -2.9504e-02,
-5.3420e-02, 4.9697e-02, 7.6779e-03,
2.1180e-02, 1.1069e-02, -1.1940e-02,
1.7302e-02, 9.9063e-02, 4.8847e-02,
4.9513e-02, 2.4240e-01, 2.7174e-01,
2.7487e-01, 1.9410e-01, 3.1165e-01,
-6.7532e-03, -1.1608e-01, -5.0876e-02,
1.2107e-01, 3.1073e-01, 7.1681e-02,
-1.1411e-01, -1.7902e-01, 7.8898e-02,
-2.0117e-02, 3.6394e-01, 1.4546e-01,
-8.0861e-03, -4.3956e-02, -1.3473e-01,
5.1519e-02, -3.1122e-01, -4.6847e-02,
5.0405e-02, -1.0611e-02, -1.0557e-01,
-4.4346e-02, -1.4505e-01, 5.3977e-02,
-2.6288e-01, 1.8247e-02, -1.1606e-01,
1.0706e-01, -9.3675e-02, 1.1757e-01,
-5.0440e-02, -1.1784e-01, -4.0599e-02,
1.9618e-01, 9.9370e-02, 8.2258e-02,
2.6762e-02, -5.0740e-02, -1.8302e-02,
5.3340e-02, 6.5710e-02, 6.1552e-03,
-7.2158e-02, -3.5563e-02, 8.2140e-02,
3.1534e-40, 3.6427e-40, 3.0437e-40,
4.2856e-41, -4.7870e-40, 5.6317e-40,
-2.4673e-40, -6.9736e-41, 8.1050e-41,
1.4544e-01, 8.2490e-02, -9.2349e-03,
2.6124e-01, 2.7494e-01, -5.4946e-02,
1.8233e-01, 1.2428e-01, -6.7498e-03,
9.7639e-02, -6.2085e-03, 4.8154e-02,
2.7379e-02, -1.8443e-01, 4.0402e-02,
1.8893e-03, -5.2282e-03, 6.7548e-03,
-1.6559e-01, 9.7901e-02, -1.1869e-01,
-2.1287e-01, 4.1023e-01, -9.7379e-02,
-1.3767e-03, -1.6343e-01, -9.5059e-02,
-1.3547e-01, 2.0094e-01, 1.0102e-01,
-2.1311e-01, -1.5088e-01, 1.8175e-01,
4.6946e-02, -1.3963e-01, 1.0220e-01,
1.7536e-01, -2.4758e-01, -1.1481e-02,
6.1596e-02, -4.0352e-01, -1.4348e-01,
3.1690e-02, 1.7240e-01, 7.0780e-02,
9.9953e-02, -1.4154e-01, -8.3038e-02,
1.4527e-01, -2.1430e-01, -7.5840e-02,
1.6146e-01, 3.7508e-02, 5.3833e-02,
1.6723e-01, 1.7113e-01, -4.8512e-02,
2.1319e-01, 4.7031e-01, 1.1570e-01,
2.0330e-01, 2.4636e-01, 6.9924e-02,
-2.1165e-40, -1.9259e-40, -5.0990e-41,
-7.1298e-42, -4.2590e-41, 3.1709e-40,
4.1065e-40, -4.2585e-41, 3.4243e-40,
-1.0338e-40, 4.6039e-40, -3.3818e-40,
-3.9589e-41, 5.9574e-40, -5.8014e-41,
1.4505e-41, -3.5326e-40, -3.9806e-40,
4.2423e-40, -1.7055e-40, -4.9666e-40,
2.2853e-40, -2.4684e-40, -1.3794e-40,
-5.6764e-40, -1.7905e-40, -5.8915e-40,
-1.4755e-27, -2.0405e-28, -4.8677e-30,
-7.1151e-28, -9.7603e-29, -3.5264e-30,
-2.7455e-29, -5.7734e-30, -2.8633e-31,
-5.9960e-06, -5.9595e-06, -5.8686e-06,
-6.0381e-06, -6.0191e-06, -5.9605e-06,
-5.9849e-06, -5.9981e-06, -5.9654e-06,
-4.8277e-22, -7.0529e-22, -8.7179e-22,
-4.6334e-22, -6.3505e-22, -8.8438e-22,
-3.3883e-22, -4.2421e-22, -5.9002e-22,
-2.9574e-40, 4.0860e-40, -1.5966e-40,
-6.7527e-41, 7.6661e-41, -5.9491e-40,
3.0843e-40, 8.1079e-41, -2.5140e-40,
-3.7315e-40, 9.4787e-41, 4.6794e-40,
1.9383e-40, 5.0336e-41, 3.0561e-40,
-5.4286e-40, 5.5999e-40, -4.6977e-40
}
,
{
-1.7778e-01, 5.2351e-03, 1.6035e-02,
-9.7482e-02, -1.1056e-02, -5.0999e-02,
1.7460e-01, -4.0005e-02, -5.0911e-02,
-9.3843e-02, 1.2640e-01, -1.5016e-02,
-5.2880e-01, 1.9469e-01, -9.0037e-02,
-8.9136e-02, 9.8632e-02, -1.5009e-01,
-1.8080e-01, 1.1396e-01, -2.6178e-02,
-1.6689e-02, 1.4132e-01, -6.7769e-03,
-2.1120e-02, 6.8616e-02, -7.8209e-02,
4.8237e-02, -2.5303e-02, 1.7882e-02,
-4.2852e-02, -1.5071e-02, -3.3818e-02,
1.3635e-01, 4.5330e-01, 2.1489e-01,
2.7362e-02, -7.4152e-02, 2.3185e-03,
1.8771e-01, -2.0827e-02, -7.5581e-02,
1.4675e-01, -6.5552e-02, 4.2292e-02,
1.3990e-01, -4.1598e-01, 2.1609e-03,
1.5997e-01, 1.1375e-01, -1.8272e-02,
1.9045e-02, -4.2702e-02, -2.5602e-02,
1.6432e-01, -1.2783e-01, -1.8285e-03,
2.9414e-01, 1.7401e-01, -2.6321e-01,
-1.0125e-01, 1.3565e-01, 1.5894e-02,
-3.7351e-40, 6.3010e-40, -1.2071e-40,
-4.6380e-40, 1.8442e-40, -3.5994e-40,
-2.1459e-40, -4.3455e-40, -6.1978e-41,
-2.3638e-40, -4.6965e-40, -3.4232e-40,
-1.6517e-40, 4.7178e-40, -1.6757e-40,
6.7890e-41, -4.3000e-40, 1.8323e-40,
4.5416e-40, -2.9010e-40, -1.5200e-40,
-3.5533e-40, -8.7351e-41, 6.5595e-42,
5.1625e-40, -6.0418e-40, -2.7846e-40,
-2.1861e-10, -2.2422e-10, -2.1298e-10,
-2.2653e-10, -2.3500e-10, -2.2512e-10,
-2.1802e-10, -2.2681e-10, -2.1608e-10,
-3.2862e-40, 3.4241e-40, -1.3264e-40,
2.8762e-40, 1.3843e-40, 3.0949e-40,
-3.7702e-40, 2.6194e-40, 2.1451e-40,
-3.2283e-40, -5.5487e-40, 5.8744e-40,
1.6124e-40, 3.3512e-40, 3.1454e-40,
-3.5417e-40, -5.7692e-40, 5.5184e-40,
3.5641e-40, -4.3187e-40, -3.5314e-40,
4.9246e-40, 5.9593e-40, 8.3132e-41,
-2.3841e-40, -5.6196e-40, -3.2230e-41,
4.3824e-40, -3.8344e-40, -9.9086e-42,
-2.9323e-40, 2.1916e-40, 4.4739e-40,
5.6837e-41, 5.1796e-41, -2.4338e-40,
-2.2853e-40, -3.8920e-40, 6.1587e-40,
-2.9474e-41, 4.6214e-40, -3.6292e-40,
-1.4928e-40, -3.6708e-41, 5.2020e-40,
-1.2983e-12, -2.6539e-12, -1.9817e-12,
-6.5613e-12, -1.0255e-11, -6.6919e-12,
-8.3217e-12, -1.7832e-11, -1.1086e-11,
-4.9138e-40, -9.0061e-42, 4.6251e-40,
-2.9970e-41, -2.5468e-40, -3.5660e-40,
2.5450e-40, -9.5634e-38, -3.2369e-32,
-1.0233e-06, -8.2108e-07, -1.1668e-06,
-5.9592e-07, -3.9529e-07, -5.7435e-07,
-6.0253e-07, -3.8785e-07, -4.9365e-07,
-8.9372e-37, -2.1590e-36, -2.1060e-40,
-1.5666e-35, -1.1466e-38, -2.3366e-40,
-5.4077e-38, 5.0487e-40, -3.3736e-40,
-1.5357e-13, -8.4607e-14, -1.9206e-16,
-5.5373e-13, -3.0787e-13, -1.0513e-15,
-1.0468e-13, -8.6069e-14, -2.2453e-16,
-4.7501e-14, -1.3426e-13, -1.1133e-13,
-1.3801e-14, -2.4024e-14, -3.5120e-14,
-1.9817e-17, -1.3229e-17, -3.2854e-17,
-1.4365e-18, -4.1143e-15, -9.2614e-14,
-1.1174e-19, -1.6235e-15, -1.5600e-13,
-1.2643e-21, -3.9578e-17, -1.2038e-14,
-2.9789e-40, -4.6452e-40, 1.5649e-40,
-1.8445e-40, -5.2942e-40, 2.5130e-40,
6.2269e-40, 3.9166e-41, -2.4197e-40,
9.0835e-02, -5.2035e-03, -2.5980e-02,
-1.0090e-01, -7.4167e-02, 1.3364e-01,
1.0302e-01, -1.5250e-01, 1.2417e-01,
4.7205e-02, -2.3839e-01, -1.4983e-02,
5.6824e-02, -1.8259e-02, 9.6426e-02,
5.9740e-03, -1.4198e-01, -2.1076e-01,
-1.5837e-01, 6.4749e-02, -2.1417e-01,
-3.4048e-02, 4.9638e-01, 2.0984e-03,
-1.4335e-01, 4.8295e-02, -9.2209e-02,
1.9450e-01, -1.3603e-01, 1.2008e-01,
1.6803e-01, 5.6805e-02, 1.1518e-01,
5.9320e-02, -3.8200e-02, -1.1340e-01,
-8.6877e-02, 1.1533e-01, -4.9870e-02,
-7.2811e-03, 2.5730e-01, -1.8536e-01,
-6.4965e-02, 1.0364e-01, 1.3706e-02,
4.6974e-02, -1.0049e-01, -1.7460e-01,
-1.7910e-01, 3.0771e-01, -2.5757e-01,
-2.2846e-02, -3.7491e-03, -5.2171e-03,
-4.7762e-02, -4.7776e-02, 5.1125e-01,
-2.0210e-01, 6.4815e-02, -6.1606e-02,
7.3686e-04, -1.6226e-01, -3.0327e-02,
5.6501e-40, 5.2828e-40, -5.9773e-40,
-4.3530e-40, -1.1658e-40, 4.9705e-41,
4.8101e-40, 5.0236e-40, 2.0476e-40,
-1.1412e-01, 1.3391e-01, -1.2279e-01,
1.4370e-01, 3.7617e-01, 7.1407e-02,
6.9661e-02, 3.1963e-01, -1.7089e-02,
-4.7530e-02, 6.5411e-02, -2.4915e-02,
3.3429e-02, -1.3899e-01, -3.3875e-02,
-1.9261e-02, -1.3162e-01, 1.1415e-01,
2.0599e-02, -3.8667e-02, -7.2190e-02,
-2.1112e-01, -1.6525e-01, -2.3430e-02,
-1.2287e-02, -2.6637e-01, 1.0859e-03,
-2.8564e-02, 4.8846e-02, 4.2412e-02,
1.4632e-01, 1.5974e-02, -1.0699e-01,
5.5661e-02, -2.0952e-01, 2.4151e-02,
-2.3510e-02, -5.0570e-02, 1.0799e-01,
1.7495e-01, -1.5788e-03, -1.6447e-02,
7.7642e-02, -9.3888e-02, 1.3891e-03,
2.2658e-02, 1.4058e-01, 1.0639e-01,
-5.5626e-02, -3.0794e-01, -5.7160e-02,
1.0874e-01, -8.3907e-02, 4.2106e-02,
1.7688e-02, 1.8090e-01, -2.1718e-03,
-1.0659e-02, -2.1302e-01, 1.0056e-01,
-6.0693e-02, -2.3624e-02, 6.3688e-03,
-2.7320e-40, -1.3336e-40, 2.4202e-41,
-7.1225e-41, 1.2848e-40, 1.5426e-40,
-4.2798e-40, 6.5075e-41, 6.2629e-40,
1.6905e-01, -1.7379e-01, -2.1360e-02,
-2.9396e-01, 1.1782e-01, 7.9111e-02,
-6.4767e-03, -1.9949e-01, 5.4243e-02,
-3.2753e-02, -1.5810e-01, 5.2257e-02,
-1.8133e-02, 2.0548e-01, -2.8071e-01,
-5.3725e-02, 8.4067e-02, -7.4639e-02,
8.9137e-02, -2.3078e-01, -1.9626e-01,
3.1276e-01, 1.5332e-01, -1.9590e-01,
-1.8318e-02, 6.8460e-02, 9.1476e-03,
8.2398e-02, 8.5883e-03, 7.6830e-02,
-1.4580e-01, 4.6253e-01, -3.1900e-01,
-1.1051e-01, 6.3807e-02, -2.5130e-02,
-1.2029e-01, -3.8982e-03, 2.1654e-02,
-3.2017e-01, 2.0265e-01, -1.7311e-01,
-1.3229e-02, 1.3805e-01, -6.2689e-02,
-3.6619e-02, -1.9366e-01, 2.7177e-01,
5.5937e-02, 7.9713e-02, -2.3872e-01,
-3.9690e-02, 2.2914e-02, -1.7779e-02,
1.1110e-01, 1.6618e-01, 3.6139e-01,
7.9777e-02, 4.3655e-01, 3.0597e-01,
-5.5125e-02, 6.1229e-02, 1.2414e-01,
2.1644e-40, 7.2343e-41, 5.5580e-40,
-4.3927e-40, 5.0561e-40, -1.5560e-41,
-3.2783e-40, -8.8219e-41, 5.4415e-40,
-6.7176e-02, -3.4930e-02, -2.7087e-02,
1.0489e-01, 2.1178e-01, -1.6752e-01,
-1.2627e-01, -2.4207e-01, -7.4667e-02,
-3.1470e-02, -1.3365e-02, 8.7742e-02,
-2.2809e-02, -4.7991e-01, 2.4740e-02,
6.4418e-02, 3.4818e-02, -2.9275e-01,
-2.8830e-01, -7.0458e-02, 7.8922e-02,
-1.4436e-01, 4.1068e-02, 6.2896e-02,
4.1061e-03, 2.1844e-01, 9.0488e-02,
-1.1085e-01, 8.3761e-02, 3.2634e-02,
3.2470e-01, -2.7760e-01, 4.1235e-02,
8.6625e-02, 2.6816e-01, -1.3560e-01,
3.8789e-01, 3.2406e-01, 1.0631e-01,
7.5131e-02, -2.0206e-01, 1.3027e-01,
4.0382e-02, 2.4350e-01, -3.6042e-03,
-1.0063e-01, 1.9418e-01, -7.7039e-02,
9.4531e-03, 7.1605e-02, 1.4004e-01,
-2.0591e-02, 4.5944e-02, -2.6721e-03,
-3.4665e-03, 2.2560e-01, -8.2930e-02,
-1.5507e-01, 2.7206e-01, -2.8665e-02,
-3.4909e-03, 1.7696e-02, -8.5492e-02,
2.1541e-40, -3.3029e-40, 1.7678e-40,
-3.9857e-40, -1.1965e-40, -8.6754e-41,
-4.0721e-40, 2.2073e-41, 4.2728e-40,
-1.0496e-02, 5.4120e-02, -1.6498e-02,
-5.9387e-02, 2.3757e-01, -8.0381e-02,
2.3739e-02, -1.3715e-01, -3.0906e-02,
-8.5760e-03, 2.4518e-02, -6.9090e-02,
2.1623e-02, 8.9641e-02, 9.9031e-02,
-1.0052e-02, 4.6506e-02, -1.5756e-01,
8.5003e-02, -3.6434e-03, 1.3816e-02,
9.0532e-02, 2.3661e-01, 1.8077e-01,
2.8120e-02, 4.3753e-02, 2.2981e-02,
3.5830e-02, 5.7995e-02, -5.6879e-03,
3.7708e-02, -2.6373e-01, 2.0886e-01,
-4.0632e-02, 1.6891e-01, -6.8996e-02,
-1.1972e-01, -4.3628e-02, 2.0278e-02,
-1.4818e-01, 4.0844e-02, 1.5917e-01,
-4.5684e-02, 1.4075e-01, -2.0784e-02,
-1.1533e-03, -2.7897e-01, -8.8707e-02,
-1.7907e-02, 1.8400e-01, 1.1026e-01,
-2.3183e-03, 6.3875e-02, -4.2394e-03,
3.2021e-02, -8.8955e-02, -2.2298e-02,
8.1353e-02, 3.3079e-01, -2.0616e-01,
-3.5802e-02, 4.9804e-02, -9.2712e-02,
-1.5940e-07, -1.6158e-07, -1.5812e-07,
-1.6273e-07, -1.6555e-07, -1.6260e-07,
-1.5867e-07, -1.6192e-07, -1.5975e-07
}
,
{
-1.5080e-02, 1.1294e-01, 7.1187e-02,
1.1628e-02, -8.4938e-01, 8.5457e-02,
-3.9642e-02, -2.3879e-02, 1.0029e-02,
2.6648e-40, 9.1590e-41, 3.3285e-40,
-3.3445e-40, -2.5194e-40, -2.0946e-40,
3.6800e-40, -1.1584e-40, 6.2195e-40,
-1.3560e-41, -8.0151e-41, 4.4048e-40,
-4.1209e-40, 2.7411e-40, 3.2419e-40,
5.8333e-40, 1.1503e-40, -5.0783e-40,
-5.5301e-02, -2.4971e-02, 4.9251e-02,
-2.5589e-01, 1.6560e-01, -8.0956e-02,
4.0518e-01, 3.1320e-02, -1.4262e-01,
1.2250e-02, 5.1989e-02, 3.0706e-03,
-7.9534e-02, -1.9801e-01, -2.7791e-02,
2.1768e-01, 6.9978e-02, -4.2325e-02,
-1.9165e-02, -2.1179e-02, -2.1558e-02,
3.6816e-01, -5.2929e-02, 9.5790e-02,
2.8095e-01, -1.4731e-01, 3.4182e-02,
2.3702e-02, 4.0764e-02, 3.5767e-02,
-8.4586e-02, 1.9025e-01, -1.6794e-01,
-1.0273e-02, 3.2259e-01, -1.5841e-01,
2.6794e-01, 5.2084e-02, 1.2761e-02,
-1.1169e-01, -1.7808e-01, 1.1363e-01,
-1.3808e-01, -1.7764e-02, -1.7420e-02,
1.5840e-02, -2.3405e-01, 7.6361e-03,
-6.6082e-02, 7.9778e-02, -2.0423e-01,
-1.9594e-02, -6.3370e-02, 3.3351e-02,
-2.0396e-40, -3.0207e-40, -3.2364e-40,
2.3575e-40, 5.8301e-41, -3.7432e-40,
-3.6291e-40, 3.3441e-40, 1.4574e-40,
-4.3792e-40, -2.5814e-40, -3.4986e-41,
-3.4920e-40, -4.4757e-40, 3.2192e-40,
4.7222e-40, -7.3197e-41, -3.4635e-40,
5.1495e-02, 7.8843e-02, 4.2243e-02,
-2.1245e-01, 1.9568e-01, 7.9369e-03,
2.2795e-02, 2.2801e-02, 7.6895e-02,
3.0044e-01, -1.4041e-01, -2.3677e-02,
-1.1656e-01, -7.5113e-02, 1.0625e-02,
-1.2133e-02, 5.0658e-02, -7.2944e-02,
-3.3652e-02, -2.0452e-01, -4.1048e-02,
2.8531e-01, 1.2116e-01, -2.1526e-02,
-2.4564e-01, -4.1870e-02, -5.5819e-02,
-2.3157e-01, -2.5594e-02, 1.1154e-01,
2.1234e-01, 3.2762e-01, -2.9000e-01,
1.8591e-02, -5.9820e-02, -9.0807e-02,
-3.0027e-01, -1.8370e-01, 1.2086e-02,
2.1178e-02, 2.9559e-01, 1.2966e-01,
6.8542e-02, 7.7710e-03, -6.0304e-02,
3.3019e-03, -1.9135e-02, 9.3227e-03,
-9.9003e-03, -1.0101e-01, -3.3513e-01,
-8.4091e-03, -1.5918e-02, -3.4323e-02,
3.8770e-40, -2.8639e-40, 4.6953e-40,
4.2631e-40, 6.2568e-41, -5.3500e-40,
-2.1987e-40, 1.3435e-40, 4.4101e-40,
-3.9973e-40, 6.3046e-40, 1.6046e-40,
4.4338e-40, 1.6940e-41, 4.1598e-40,
2.6132e-40, -2.9888e-40, -7.5708e-41,
-1.5991e-02, 8.2749e-02, -6.3776e-02,
-3.2220e-03, 4.1443e-02, -8.1219e-02,
-1.1231e-01, 6.7586e-01, -1.7600e-01,
-4.0371e-02, -7.9044e-02, 1.2451e-01,
4.1907e-02, -8.8159e-02, -1.1229e-01,
-4.0654e-03, -4.4087e-03, 1.2942e-01,
9.3318e-03, -6.5085e-02, 1.0165e-02,
-2.8758e-02, -4.9997e-02, 4.6069e-02,
4.2107e-04, 2.1718e-01, 3.1080e-03,
-9.1277e-03, -2.8568e-02, 1.6202e-02,
-8.2490e-03, 1.2888e-01, -1.3159e-01,
1.6065e-02, 4.0143e-02, 2.7043e-01,
-3.4809e-02, -8.1302e-03, 6.0786e-02,
5.1845e-02, 4.6995e-01, -1.0392e-02,
2.3359e-02, -1.8364e-01, -3.7343e-01,
-8.2996e-02, 9.7724e-02, -6.1012e-02,
2.8225e-02, 8.8706e-02, 1.3443e-02,
3.7515e-03, 1.7772e-02, 6.5945e-03,
-7.3847e-12, -7.5629e-12, -6.9337e-12,
-7.6292e-12, -7.8624e-12, -7.2877e-12,
-7.0582e-12, -7.3197e-12, -6.8467e-12,
1.5445e-11, 2.0754e-11, 2.0524e-11,
2.1239e-11, 2.5909e-11, 2.5983e-11,
2.0986e-11, 2.5190e-11, 2.2478e-11,
-4.7164e-02, -2.4754e-02, -1.8256e-02,
1.0526e-01, -4.6010e-03, -2.2784e-02,
-5.2028e-02, -1.6408e-01, 7.9112e-03,
-8.1863e-02, 4.2772e-02, -9.9446e-04,
-5.5521e-02, -1.1264e-01, -4.5782e-02,
-1.1026e-01, 2.1443e-02, -4.5120e-02,
-1.4141e-02, -2.8116e-03, 2.6990e-02,
-2.0201e-01, 4.3214e-01, 2.9373e-02,
-2.1768e-01, -2.7230e-02, 5.5396e-03,
5.0196e-02, 1.5506e-01, -5.7328e-02,
4.8323e-02, 3.8243e-02, -1.3533e-01,
-9.8862e-03, -5.6971e-02, -7.1500e-02,
1.0272e-01, 7.4686e-02, 7.4732e-02,
8.3744e-02, 1.5834e-01, 2.9221e-02,
6.5641e-02, 7.7697e-02, 3.5746e-02,
-1.6614e-01, -2.3128e-01, 4.4691e-02,
6.3546e-02, -3.8105e-01, 3.4110e-02,
-3.5022e-02, -2.3782e-02, 2.8664e-02,
-3.8813e-41, -2.8626e-40, -9.0218e-41,
4.1216e-40, -4.4215e-40, 3.1198e-40,
5.6281e-40, 2.0477e-40, 2.7797e-40,
-4.4903e-40, -6.2574e-41, 4.9971e-40,
5.0135e-40, -3.1945e-40, -2.4694e-40,
2.6587e-40, -4.9583e-40, -4.9771e-40,
3.7139e-02, 5.2936e-04, -2.3658e-02,
-3.6199e-01, -5.1912e-02, -5.1969e-02,
2.5415e-01, 2.4109e-01, 9.8721e-03,
5.5061e-02, -4.7469e-02, 3.0045e-02,
2.1565e-03, -2.3866e-02, -2.3496e-02,
6.0892e-02, -4.6442e-04, -5.0200e-02,
5.4971e-02, -1.7234e-02, -3.2759e-03,
4.8225e-01, -1.1234e-01, 3.8257e-02,
5.2105e-02, -2.8473e-03, -1.0355e-02,
-9.5654e-03, -1.8751e-01, 1.7079e-02,
7.0133e-02, 7.6363e-01, -8.7388e-02,
-5.6536e-02, -1.9152e-01, -1.6043e-01,
2.0359e-01, 7.4214e-02, 3.1970e-02,
-1.8199e-01, -1.9386e-01, -2.5967e-03,
-3.4609e-02, 3.3870e-02, 5.8835e-02,
8.8220e-02, 9.9265e-02, 7.1240e-03,
-9.1395e-02, -3.1699e-01, -2.9120e-02,
-1.8436e-02, -2.1432e-02, -4.5465e-02,
-3.2013e-40, 3.2019e-40, 4.8747e-41,
2.6585e-40, 6.1463e-40, 1.4176e-40,
-1.5286e-40, 3.0543e-40, 7.2032e-41,
-6.0758e-40, -3.6200e-40, 1.2123e-40,
1.3627e-40, 3.2983e-40, 3.6171e-40,
-4.2148e-40, 1.1102e-40, 3.2714e-40,
-3.4763e-02, -3.1632e-02, 3.0044e-02,
-2.0935e-01, 1.3533e-01, -9.1607e-03,
-1.5931e-01, 1.0771e-01, -6.6518e-02,
2.4399e-02, 2.2923e-03, 5.1575e-02,
-1.4154e-01, -1.0013e-02, -7.5696e-02,
1.0849e-01, 1.2575e-01, -7.3161e-02,
-1.5217e-02, -2.7659e-02, -3.1401e-02,
3.4960e-01, 7.2390e-02, 2.0722e-02,
3.9440e-01, 9.1821e-04, 1.7842e-02,
-1.5670e-02, 5.3020e-02, 6.0536e-02,
-1.8853e-01, 2.7532e-01, -1.9681e-01,
8.3258e-02, 9.4285e-02, -1.2695e-01,
2.7593e-01, 1.1456e-01, 1.6048e-02,
-5.1675e-01, 1.4727e-01, 7.5170e-02,
-6.9143e-02, -9.2948e-02, 3.4687e-02,
1.4128e-02, -7.9962e-02, 8.0446e-02,
3.7011e-02, -1.3400e-01, -2.0725e-02,
-6.4981e-03, 7.0724e-02, 6.6167e-02,
-4.5940e-41, 2.5437e-40, -3.3111e-40,
5.9661e-40, 6.2521e-40, 5.6418e-40,
1.9187e-40, -5.8872e-40, 5.5747e-40,
-1.6402e-11, -2.2097e-11, -1.7224e-11,
-2.2755e-11, -2.9977e-11, -2.1231e-11,
-1.3688e-11, -1.7479e-11, -1.3081e-11,
6.4790e-03, -3.8464e-03, -1.0008e-02,
-2.6001e-02, -7.9483e-02, 3.3711e-02,
2.6659e-03, -3.2634e-02, 1.0767e-02,
4.9939e-03, 1.4064e-02, -3.4294e-02,
4.8529e-02, 6.3386e-01, -3.6805e-02,
-1.3703e-01, 2.5878e-02, -4.8617e-02,
3.2186e-02, 6.6382e-02, 1.9305e-02,
7.0196e-02, -1.6892e-01, -2.8980e-02,
9.7762e-02, 9.7998e-03, -5.1620e-03,
5.0753e-02, -4.5071e-03, -3.9836e-02,
-6.0381e-02, -9.2016e-02, 9.5433e-02,
-1.0045e-02, 8.7955e-03, 4.9429e-02,
-1.8363e-02, -1.1912e-01, 9.7347e-03,
-1.5657e-01, -2.1035e-01, -4.9737e-02,
-3.0025e-02, -6.4959e-02, -5.6107e-02,
3.2927e-40, 5.7263e-40, 6.2889e-40,
-6.0716e-39, 5.3050e-41, -1.7152e-40,
-3.2493e-38, -1.5841e-40, -1.9343e-40,
4.9763e-40, 5.5142e-40, -4.3462e-40,
-2.2649e-40, 1.4321e-40, -2.6779e-40,
2.3072e-41, 5.4080e-40, -6.4200e-41,
2.2827e-40, -5.4515e-41, -4.1768e-40,
3.9033e-40, 6.1988e-41, 5.9877e-40,
-4.3355e-41, -5.1088e-40, 5.9845e-40,
-4.8238e-40, -1.8586e-40, 4.8699e-40,
-9.7225e-41, 4.3387e-40, -4.3683e-40,
-7.9278e-41, -5.3614e-40, 2.1911e-40,
-3.3982e-40, -5.3335e-40, 3.8540e-40,
1.9051e-40, -2.0840e-40, 2.2868e-40,
-3.5020e-40, -3.4276e-40, 2.7395e-42,
3.9197e-40, 6.1843e-40, -1.5888e-40,
4.3516e-40, -6.1852e-40, -5.3692e-40,
-4.3268e-40, 3.5154e-40, 3.4477e-40,
-4.8414e-40, 2.2647e-40, -2.5591e-40,
4.6326e-40, -3.0462e-40, 4.7817e-40,
-4.9853e-40, -5.3425e-40, -2.9848e-40,
-1.3329e-07, -1.3784e-07, -1.3049e-07,
-1.3376e-07, -1.3905e-07, -1.3204e-07,
-1.2479e-07, -1.2994e-07, -1.2410e-07
}
,
{
-2.5964e-02, 2.9670e-02, 1.2100e-01,
-3.0371e-02, -1.5277e-02, -1.8589e-01,
-1.8650e-02, -1.2852e-01, -6.6297e-02,
9.7934e-04, -5.1835e-02, -1.0278e-03,
-1.2336e-02, 2.2130e-01, -1.2373e-01,
-2.3451e-02, 3.4217e-02, -1.0118e-02,
-3.0558e-01, -8.5390e-02, -1.4360e-02,
1.2473e-01, -1.7005e-02, -3.6816e-02,
-8.9125e-02, -6.1400e-02, -2.0623e-02,
1.3736e-02, 1.2441e-02, -4.3491e-02,
6.4806e-02, 3.7012e-01, 3.8064e-02,
-1.3731e-02, -2.4859e-01, -2.5450e-01,
-6.5111e-03, -1.4271e-01, -5.0481e-02,
5.3240e-02, -3.4843e-02, -2.2703e-02,
3.7414e-02, 1.0334e-01, -7.2237e-02,
1.4216e-02, 3.4231e-02, -2.0890e-02,
2.7879e-02, 1.3717e-01, 4.5864e-03,
3.0460e-03, -1.1734e-01, 4.4439e-02,
6.4825e-03, 1.6324e-02, 1.4928e-02,
-8.8420e-02, -1.0779e-01, -9.0653e-02,
3.1086e-02, -2.9067e-02, -8.8488e-02,
-1.6779e-40, -6.3646e-41, -6.2486e-40,
2.3154e-40, 2.8049e-40, 3.7718e-40,
-3.3950e-40, -3.1501e-40, 5.8709e-40,
2.1435e-02, -4.3732e-01, 1.5520e-02,
3.4080e-02, 1.9912e-01, -8.1413e-02,
-3.2816e-02, 5.7844e-02, 8.9258e-03,
-1.1662e-02, -1.1721e-02, 4.3033e-02,
5.2135e-02, -2.2503e-01, 2.3941e-01,
3.8400e-02, 1.8075e-01, -1.4776e-01,
2.6784e-01, 2.2817e-01, -3.0553e-03,
-6.7998e-02, -1.2050e-01, 1.4714e-02,
2.4045e-02, -1.4329e-02, -1.6705e-02,
-1.1421e-02, 4.2139e-02, 4.2944e-02,
1.8809e-02, -2.5221e-01, 9.7562e-02,
-4.1600e-02, 4.0069e-03, 7.5290e-02,
-2.0092e-02, 2.3537e-01, 2.4356e-02,
3.1957e-02, -4.8573e-02, 2.9379e-02,
6.4562e-03, -1.1527e-01, -9.1223e-02,
-2.3432e-02, 5.2881e-02, -7.3239e-02,
-3.7048e-02, -2.1481e-01, 5.9801e-05,
-4.2646e-02, -1.8366e-02, -1.0681e-01,
-1.3366e-01, -1.7123e-01, -3.5629e-02,
1.1216e-01, 1.1479e-01, 9.5297e-02,
2.4728e-02, -7.3135e-03, -3.4373e-02,
-2.3917e-40, -4.1869e-41, 3.7775e-41,
2.8931e-40, -9.4850e-41, 2.5694e-40,
3.3549e-40, -2.4334e-40, -5.5933e-41,
-2.0900e-02, 2.1203e-02, -4.7169e-02,
2.3632e-02, -7.1148e-01, 4.9722e-02,
-7.8963e-03, 5.0689e-02, 2.2619e-02,
-4.7364e-03, 3.2037e-02, 1.1004e-02,
-4.3001e-03, 2.5245e-01, 5.9112e-02,
2.8932e-02, -1.1267e-01, -2.3739e-01,
-6.5379e-02, 5.2462e-03, -1.6807e-02,
1.0960e-01, 1.7943e-01, -6.3043e-03,
9.3102e-02, 7.3103e-02, 2.5259e-02,
5.6835e-02, 4.0467e-02, 2.5447e-03,
9.4599e-02, 2.5222e-01, 6.9855e-02,
4.4758e-02, 1.8073e-01, 1.5075e-01,
2.0329e-02, -4.9412e-02, 2.0663e-02,
-7.1648e-03, 1.4986e-01, 2.1212e-01,
2.7657e-02, -6.8660e-02, 1.7321e-02,
1.0629e-02, -1.0722e-02, 2.8247e-02,
-1.1303e-02, 1.0076e-01, -4.0592e-01,
2.6744e-02, 7.3650e-02, 5.7966e-02,
2.8122e-02, -7.5961e-02, -9.4797e-03,
-1.3010e-01, -5.4184e-01, -1.3619e-01,
-1.8661e-03, -1.4357e-01, 7.9520e-03,
-1.3538e-09, -1.6580e-09, -1.7289e-09,
-1.2386e-09, -1.5132e-09, -1.5987e-09,
-1.1157e-09, -1.3420e-09, -1.4090e-09,
1.5441e-02, -1.8142e-01, -8.6802e-02,
-4.0983e-02, 2.4351e-01, -5.8181e-02,
-2.9568e-02, 3.9561e-03, 3.4181e-02,
-2.9210e-02, 2.5403e-02, 9.1331e-02,
2.3621e-02, 2.3954e-01, 5.2487e-02,
1.6509e-02, -6.2728e-02, 1.3448e-02,
1.2855e-01, 1.1892e-02, -1.3356e-02,
1.0810e-01, 1.6760e-01, -3.2040e-02,
6.2209e-02, 4.0682e-02, 3.9772e-02,
-6.1711e-03, 5.0588e-02, -1.0811e-01,
1.5744e-02, 1.6091e-01, -6.1739e-02,
-5.6717e-02, -1.0657e-02, -3.7943e-02,
-4.0595e-02, 8.0149e-02, 2.0216e-02,
3.8838e-02, -6.3586e-01, 2.3785e-01,
-1.0472e-02, 6.3899e-02, -8.2184e-02,
-1.9137e-02, 8.1163e-02, 6.7065e-02,
-2.2377e-03, 1.1860e-01, 3.4122e-02,
1.0501e-02, 2.9851e-02, 7.5841e-02,
5.8970e-02, -1.2188e-01, 7.7982e-02,
-2.6516e-02, -4.1289e-01, 2.1471e-02,
3.3957e-02, 3.5762e-02, -5.7857e-02,
-2.7357e-30, -3.4780e-30, -3.0306e-30,
-1.5188e-30, -1.9888e-30, -1.8755e-30,
-7.7431e-31, -9.7571e-31, -9.7402e-31,
-1.8497e-02, -2.4554e-02, 1.4428e-01,
1.4217e-02, -2.3647e-01, 8.4097e-02,
-1.0251e-02, -4.2137e-03, 6.0831e-03,
1.7742e-03, 2.1487e-02, 3.3147e-02,
-1.0971e-02, 3.0162e-01, 5.2391e-02,
1.8341e-02, -1.3390e-01, 9.4303e-02,
-1.5685e-01, 9.8434e-02, -1.2502e-03,
3.1370e-01, -2.8879e-02, 2.6313e-03,
1.7548e-02, 6.6741e-03, -1.7681e-03,
5.2062e-02, 6.6914e-02, 7.5256e-03,
2.4966e-02, 2.8081e-01, 2.9815e-02,
2.2375e-02, 1.4257e-03, -7.4702e-02,
1.5372e-02, 3.9587e-02, 4.6909e-02,
-2.2911e-02, -1.4568e-01, -3.8964e-01,
2.2850e-02, -4.2297e-02, 6.5736e-02,
-6.9905e-03, -6.3972e-02, -1.8430e-01,
4.4453e-03, 2.0687e-01, 3.0032e-01,
1.7243e-02, 9.8548e-03, -9.7476e-02,
-7.9682e-04, -2.1199e-01, -4.3461e-02,
-4.2929e-02, -2.8227e-01, 2.8997e-02,
-1.8741e-03, 1.1166e-02, 1.8381e-03,
-5.6725e-16, -1.0368e-15, -1.1480e-15,
-5.5537e-16, -9.9929e-16, -1.1499e-15,
-3.8787e-16, -6.4019e-16, -7.7595e-16,
4.4505e-02, 8.8803e-02, 1.1384e-02,
-3.9434e-02, 1.9319e-01, -1.2016e-02,
-4.6072e-02, 1.1769e-01, 7.4816e-03,
-3.7856e-02, -1.7147e-02, 1.5984e-01,
-2.6459e-02, 1.7469e-01, 1.2584e-01,
1.6387e-02, 1.7370e-01, -1.7350e-01,
-3.0008e-01, 2.1485e-01, -5.4302e-02,
5.7724e-02, 3.2168e-01, -2.5261e-02,
6.9277e-02, 7.5035e-02, 6.3485e-02,
-1.1688e-01, 2.6068e-02, -1.3490e-01,
-1.6085e-01, 1.9409e-01, 1.1434e-01,
-7.3819e-02, -7.7880e-02, 7.3699e-03,
-9.9972e-02, 1.3554e-01, 2.1656e-02,
-8.8303e-02, 5.4435e-01, -4.0582e-02,
-3.4805e-02, -1.5291e-01, -3.6917e-02,
-3.4377e-02, -3.3086e-02, -9.5097e-02,
-7.4538e-03, 2.2545e-01, -2.6380e-02,
1.4440e-02, 1.3205e-01, 1.6164e-01,
9.2164e-02, -8.4307e-02, 7.8922e-02,
1.2519e-01, -6.1809e-01, -1.0895e-01,
6.2744e-02, -4.4951e-02, -3.2548e-02,
-2.5422e-21, -6.3849e-21, -9.5560e-21,
-1.9248e-21, -4.7107e-21, -6.4244e-21,
-1.4638e-21, -3.1947e-21, -3.7663e-21,
-8.6113e-03, -7.0987e-02, 5.8265e-02,
-1.3148e-02, 5.6371e-01, 5.0580e-02,
1.1741e-02, -3.5614e-02, -6.1265e-02,
1.4758e-03, 3.3349e-02, -1.0867e-02,
-4.0234e-02, 1.9894e-01, 1.3972e-01,
-1.9167e-02, -4.1723e-02, -1.9982e-01,
-3.0756e-01, 2.6284e-02, -1.9058e-02,
-7.9349e-04, 1.2644e-01, 2.9567e-02,
-3.9274e-02, 1.1030e-02, -9.4885e-03,
1.3541e-02, 1.7044e-01, 8.9626e-02,
6.6814e-02, 2.6430e-01, 1.7409e-01,
-6.1034e-04, 1.7569e-02, 1.3090e-01,
-4.1941e-03, 8.9599e-02, -3.3684e-02,
-1.1310e-02, -4.3731e-01, 5.7177e-02,
-4.5718e-04, 1.0175e-01, 4.1211e-02,
2.9756e-02, -1.1601e-01, -7.3171e-02,
2.7939e-02, 2.1334e-01, -4.0210e-01,
-8.6847e-03, 8.1829e-02, 4.4225e-02,
-1.1411e-01, -1.7697e-01, -5.8087e-02,
7.9613e-02, -4.2814e-01, -1.0814e-01,
-3.0610e-02, 1.1342e-03, -2.2322e-03,
-1.1254e-10, -1.4207e-10, -1.5402e-10,
-9.9123e-11, -1.2394e-10, -1.3338e-10,
-8.8840e-11, -1.0857e-10, -1.1463e-10,
3.0283e-02, -5.6191e-02, -1.0447e-01,
-1.4578e-02, -2.8745e-01, 1.9089e-01,
-2.7251e-02, 9.8069e-02, -1.4580e-02,
-3.0276e-02, 1.4366e-02, 2.6363e-02,
-8.4962e-02, 7.8998e-02, -4.7717e-02,
-3.2004e-02, -2.1579e-02, 1.1247e-02,
1.3895e-01, -3.3900e-01, 7.7998e-03,
2.4769e-01, -1.8506e-01, -2.3116e-03,
3.1361e-02, -1.1718e-02, -1.8286e-02,
-1.3020e-01, 1.4334e-01, -5.5700e-02,
-3.5386e-02, 1.0992e-01, -8.0235e-02,
-5.8978e-03, 7.7039e-02, -7.4619e-02,
-8.1603e-02, 1.2982e-01, -7.3193e-02,
-6.1469e-02, 1.7131e-01, 4.0255e-01,
-6.4582e-03, -8.2741e-02, -2.2220e-02,
1.6876e-02, -3.2590e-02, 5.5645e-02,
2.5231e-02, 2.9984e-01, -3.6995e-02,
9.3322e-03, 2.0758e-01, -2.1986e-02,
-4.9568e-02, 2.1857e-03, 8.6127e-02,
8.6593e-02, -5.8134e-01, 3.4507e-01,
4.8855e-02, -1.0506e-01, 4.1584e-02,
2.5428e-40, -4.4558e-40, -2.2090e-40,
-2.9727e-40, -4.8454e-40, 3.0397e-40,
1.1696e-40, -3.3028e-40, -2.2959e-40
}
};
static __device__ __constant__ const float HDNL1biasL[8][8] =
{
{
-3.1869e-08, -3.8279e-01, -6.3693e-05, -5.9054e-02, 9.3774e-04, -2.9944e-02, -1.1156e-03, -7.5635e-02
}
,
{
-1.7701e-01, -1.3417e-06, -3.0706e-40, -1.9022e-06, -1.2965e-02, -6.6444e-40, 1.4699e-02, 2.6082e-02
}
,
{
-3.7577e-07, 4.4550e-03, -8.1266e-04, 3.2408e-01, -1.1321e-07, -1.8907e-23, -1.9770e-25, -3.2394e-02
}
,
{
-2.1525e-14, -1.4130e-02, -1.9410e-02, -1.8703e-02, -2.9177e-02, -4.0635e-02, 7.8097e-02, -1.1643e-01
}
,
{
-2.6309e-02, -2.2238e-02, 6.8700e-03, -1.7973e-02, -1.0893e-02, -1.1888e-02, -4.9598e-03, -6.3663e-06
}
,
{
-1.2406e-03, -2.4901e-12, -9.7265e-07, 6.3490e-03, 1.3495e-01, -3.8411e-03, -6.6630e-03, -7.3614e-03
}
,
{
-2.7729e-03, -4.8174e-03, -6.3012e-03, 2.0491e-01, -2.0110e-03, -3.0974e-03, 5.1407e-01, -3.5016e-08
}
,
{
0.0324, 0.0140, 0.6750, 0.2661, 0.3646, 0.3591, 0.5597, 0.0816
}
};
static __device__ __constant__ const float HDNL1kernelsL10[4 * 8] =
{
0.0882, 0.0422,
0.3775, 0.4754,
-0.3209, -0.4870,
-0.0384, 0.0530,
0.1034, 0.0173,
0.5011, 0.3900,
0.3621, -0.1645,
-0.1304, 0.0013,
0.2230, 0.3026,
0.1618, -0.4514,
-0.2097, 0.1894,
-0.0326, 0.1434,
0.2421, 0.3363,
-0.0938, 0.3156,
0.1137, -0.2165,
0.2273, -0.1284
};
static __device__ __constant__ const float HDNL2kernelsL1[9 * 8] =
{
-2.0676e-02, 6.7641e-03, 2.8287e-01,
2.5576e-01, 1.9765e-01, -2.4700e-01,
3.5056e-01, 2.9306e-01, -2.2245e-01,
8.4706e-02, -2.9455e-01, -5.5831e-02,
-8.4635e-02, -9.6835e-02, 3.1208e-01,
1.7690e-01, 2.7624e-02, 5.1954e-02,
-5.3869e-01, 7.2934e-02, -1.7662e-03,
-3.1402e-02, 3.1700e-01, 1.4965e-01,
3.8569e-02, 5.5025e-03, -6.6555e-03,
-4.2049e-38, -4.1971e-38, -4.1488e-38,
-4.2855e-38, -4.2871e-38, -4.2363e-38,
-4.1861e-38, -4.1974e-38, -4.1677e-38,
1.8451e-01, -5.4584e-02, 1.4494e-01,
1.3433e-01, 1.0073e-01, 2.6371e-01,
6.1261e-02, 2.2116e-01, 2.0074e-01,
5.9669e-02, -3.9168e-02, 2.1674e-01,
-2.9132e-01, 3.0285e-03, 1.2625e-01,
-4.3415e-02, 1.8663e-01, -1.6554e-01,
1.0102e-01, 6.3466e-02, 1.5225e-01,
2.1692e-01, 1.9860e-01, -7.0456e-02,
-1.6406e-03, -2.7834e-01, -3.5449e-01,
-3.0140e-01, -4.2348e-01, -5.8263e-01,
2.3140e-01, -2.6843e-01, -1.1069e-01,
-9.1484e-02, 1.1486e-02, 5.6396e-02
};
static __device__ __constant__ const float HDNL2biasL1[8] =
{
-9.0964e-02, 2.1136e-01, -1.2011e-02, -4.5657e-38, -1.4443e-01, 1.8968e-01, -2.9027e-02, 1.6199e-01
};
static __device__ __constant__ const float HDNL2kernelsL[8][9 * 8 * 8] =
{
{
4.4561e-02, 4.3527e-01, -8.9737e-02,
-4.9011e-03, 1.4879e-01, -8.2210e-02,
-1.7593e-02, 4.9294e-02, 1.8058e-01,
-3.3827e-02, -7.9055e-02, 2.6982e-01,
-5.2485e-02, -4.2046e-01, -5.6838e-02,
1.0919e-01, -7.3141e-02, 9.4797e-02,
6.2764e-02, 2.5475e-01, 1.3705e-01,
2.0997e-01, 7.3360e-01, 2.0801e-01,
-1.1500e-01, 3.1245e-01, 6.7457e-01,
-5.1481e-39, -5.1520e-39, -4.9367e-39,
-5.1383e-39, -5.1642e-39, -4.9479e-39,
-5.1323e-39, -5.1859e-39, -4.9547e-39,
1.3849e-01, 1.1564e-01, -1.8175e-01,
-5.5355e-03, -1.5117e-01, -2.4654e-01,
8.1590e-03, -1.1681e-01, 3.4700e-05,
-2.5950e-01, -1.4182e-01, 3.1814e-01,
1.7662e-01, 1.8420e-01, -1.5181e-01,
7.6233e-02, -7.8372e-02, -3.1968e-01,
-4.5770e-01, 4.1562e-02, 1.3721e-01,
-5.8444e-02, 3.3148e-02, -2.3370e-01,
1.5374e-01, -1.1162e-01, -7.4099e-03,
-1.5716e-01, -1.8356e-01, 2.1114e-02,
-3.2233e-01, 2.1064e-02, 2.7019e-01,
-1.3702e-01, 2.6969e-01, 2.1033e-01,
8.9027e-02, -7.9969e-02, 1.0096e-01,
6.6773e-02, 3.9558e-02, -7.4944e-02,
-5.9789e-02, 1.2265e-01, 3.3873e-02,
-9.7157e-03, 9.2906e-02, 6.0300e-02,
-2.2104e-03, 6.8198e-02, -1.2931e-01,
8.9288e-02, -1.2554e-01, -4.3270e-02,
1.0660e-01, 1.1609e-02, -1.2415e-01,
2.6372e-02, -3.6311e-02, 1.5625e-01,
-7.9595e-02, -3.3662e-01, -4.0760e-01,
-2.9566e-39, -2.8760e-39, -2.8816e-39,
-2.9566e-39, -2.8964e-39, -2.9115e-39,
-2.9566e-39, -2.9179e-39, -2.9130e-39,
7.9255e-02, 9.4548e-02, 8.8155e-02,
-2.8163e-02, 1.2428e-01, -6.4973e-03,
7.7875e-02, 7.4765e-02, -5.2405e-02,
-1.4886e-02, -7.1499e-02, -7.0719e-02,
9.7562e-02, 9.0948e-02, -5.6588e-02,
-1.2872e-02, -6.6390e-02, -6.4147e-02,
9.8262e-02, -2.4215e-01, -1.7051e-01,
1.8096e-01, 1.8106e-01, 1.3108e-01,
2.0649e-01, 1.2242e-01, 3.7225e-02,
-2.5125e-01, -1.0073e-01, 4.5330e-01,
1.8588e-01, -2.6809e-01, -1.5709e-01,
4.7668e-01, -2.4208e-01, -6.6012e-01,
1.3561e-01, 5.4109e-02, 6.1899e-02,
-1.9605e-02, 1.1349e-01, 3.5781e-02,
3.5513e-03, 3.1212e-02, -6.0399e-02,
5.9258e-02, -1.8175e-02, 7.3714e-02,
2.0052e-02, 4.3245e-02, -5.0879e-03,
-1.1082e-02, -1.0753e-01, -1.7896e-03,
2.9139e-02, 2.2747e-01, -6.4075e-02,
7.3097e-02, 1.5703e-01, -5.3815e-01,
1.0620e-01, -1.1386e-01, 1.7103e-01,
-3.8728e-39, -3.8299e-39, -3.8320e-39,
-3.9065e-39, -3.8445e-39, -3.8135e-39,
-3.8838e-39, -3.8114e-39, -3.8255e-39,
2.3253e-02, 6.9893e-02, 1.4774e-01,
9.6087e-02, 2.3102e-03, -3.4449e-02,
2.6819e-02, 1.0254e-01, -2.8200e-02,
3.9553e-02, 4.7191e-05, -5.5558e-02,
4.1641e-02, 5.8706e-02, -1.0337e-01,
1.1291e-01, 5.9622e-02, 7.0677e-02,
-2.5162e-01, 7.6659e-02, 1.7245e-01,
-5.8522e-02, 1.4365e-01, 2.1189e-01,
-2.8897e-02, -5.7365e-02, 1.4232e-01,
1.7854e-02, 1.7404e-03, -8.7356e-03,
-6.0777e-02, -6.2687e-02, -1.1500e-02,
-1.6468e-01, -2.5058e-01, -1.2798e-01,
2.3193e-02, 1.7209e-01, 1.6687e-01,
-3.4483e-02, -1.6846e-02, 2.5930e-02,
1.4410e-01, 4.2932e-02, -5.0149e-03,
4.7269e-02, 1.1276e-01, -9.2701e-03,
1.5323e-02, 1.3552e-02, 9.0256e-02,
-8.9393e-03, 7.0903e-02, -6.9379e-02,
1.8645e-01, 1.0543e-01, -1.5590e-01,
2.1056e-01, 1.1051e-01, -1.5514e-01,
-7.0484e-02, -1.5153e-01, -5.0873e-01,
3.2730e-39, 3.2358e-39, 3.1222e-39,
3.2642e-39, 3.2358e-39, 3.0921e-39,
3.2730e-39, 3.2358e-39, 3.0899e-39,
1.2225e-02, 1.2386e-01, 6.7712e-02,
3.1263e-02, 1.3617e-01, 1.5352e-01,
2.3405e-02, 8.5466e-02, 8.7303e-02,
-2.0372e-02, 8.3465e-02, -7.4233e-02,
1.2269e-01, 8.4046e-02, -3.6869e-02,
1.0242e-01, 7.3218e-02, -1.1496e-01,
-1.4539e-01, -2.3923e-01, -2.2818e-01,
-3.2368e-02, -7.4360e-02, 2.3493e-02,
1.7004e-01, 6.2924e-02, 8.9327e-02,
-1.1449e-01, -1.4973e-03, -7.0451e-03,
-9.3205e-02, -1.0312e-01, 4.6503e-02,
-2.2148e-01, -1.8111e-01, -1.1992e-01,
9.8140e-02, 9.9823e-02, -2.0282e-02,
-8.1973e-02, 1.4255e-01, -5.2392e-02,
8.0350e-03, -4.8299e-02, -7.7908e-02,
4.2383e-02, 3.0707e-02, 2.8560e-02,
1.0437e-01, 6.1290e-02, -9.7796e-02,
-1.7125e-02, -1.3572e-01, -1.5345e-01,
-1.3292e-01, 2.9477e-02, 6.8032e-02,
1.5741e-01, 4.0258e-01, 2.5838e-01,
1.3948e-01, 3.5713e-01, -3.9825e-01,
-1.9224e-39, -2.4076e-39, -2.4529e-39,
-1.9181e-39, -1.9894e-39, -4.0240e-39,
-1.9335e-39, -2.3920e-39, -4.0147e-39,
-2.1714e-02, -3.5299e-02, -7.5803e-03,
-2.4087e-02, 7.5265e-02, 7.6697e-02,
4.5309e-02, 8.9529e-02, 7.6510e-03,
1.0813e-02, 3.1294e-02, -2.5907e-02,
1.1962e-02, -6.8664e-03, -1.4084e-01,
7.7013e-02, -1.2305e-01, -6.7800e-02,
-9.7392e-02, 4.4082e-02, 1.4473e-01,
4.9436e-02, 2.8859e-01, 2.8252e-01,
-3.5828e-02, -7.5616e-02, 2.4875e-01,
-6.7684e-02, 1.1290e-01, 4.2827e-02,
-1.0860e-01, 1.2952e-01, 5.9784e-01,
-3.5402e-01, -3.9558e-02, -6.0775e-01,
-1.2854e-02, 1.5240e-01, 1.4115e-01,
-2.8134e-02, -1.2939e-02, -2.6203e-02,
1.1300e-01, 1.4481e-01, -5.1454e-02,
1.2688e-01, 2.8536e-02, 9.4877e-02,
9.6033e-02, -1.3901e-02, 6.0035e-02,
-1.1249e-01, 4.3971e-02, -1.0918e-01,
8.2500e-02, 2.1413e-01, 3.9015e-02,
1.8361e-01, 2.5271e-01, -2.2794e-01,
-8.1195e-02, -1.2269e-01, -2.6097e-01,
7.6827e-39, 7.7882e-39, 7.6893e-39,
7.7006e-39, 7.7857e-39, 7.7384e-39,
7.6985e-39, 7.7712e-39, 7.7399e-39,
1.4458e-02, 1.0801e-01, 1.5906e-01,
-1.4676e-02, 1.3699e-01, 9.2460e-02,
-3.6479e-02, 1.4529e-01, -2.8681e-02,
-3.3251e-02, -7.3096e-02, -1.4330e-01,
5.7009e-02, -3.1905e-02, -1.2035e-01,
1.1838e-01, 5.7011e-02, 2.0800e-02,
-1.1567e-02, -2.2125e-01, -9.3953e-02,
-7.5378e-02, -1.2069e-01, 1.3217e-01,
-7.7357e-02, -1.3171e-01, 1.2776e-01,
-1.1397e-01, -3.5183e-02, 2.2994e-02,
-6.5101e-02, -1.5019e-01, -2.7451e-02,
-2.4260e-01, -1.3543e-01, -1.9889e-02,
-1.9798e-39, -3.5282e-40, -1.9216e-39,
-1.9140e-39, -1.9370e-39, -1.9943e-39,
-1.8623e-39, -1.8665e-39, -1.9320e-39,
-4.8850e-39, -5.0283e-39, -4.9987e-39,
-5.0868e-39, -5.0814e-39, -5.0779e-39,
-5.2489e-39, -5.1086e-39, -5.1234e-39,
-2.9120e-39, -3.0278e-39, -2.9633e-39,
1.3186e-39, 6.0555e-39, 6.0419e-39,
-5.5922e-39, -8.5992e-40, -2.8529e-39,
-3.4668e-39, -3.5127e-39, -3.4668e-39,
-3.2831e-39, -3.4668e-39, -3.6734e-39,
-3.2142e-39, -3.2831e-39, -3.5816e-39,
1.3445e-39, 1.3621e-39, 1.3375e-39,
1.4539e-39, -2.2695e-40, 1.4522e-39,
1.3563e-39, 1.3339e-39, 1.3001e-39,
-4.4670e-39, -4.4026e-39, -4.3159e-39,
-4.5047e-39, -4.3505e-39, -2.7259e-39,
-4.5265e-39, -4.4721e-39, -4.4990e-39,
-1.9864e-39, -4.1379e-39, -3.7189e-39,
5.2465e-39, 2.5220e-39, 1.5639e-39,
-3.9760e-39, -5.7033e-39, -4.0978e-39,
-6.3745e-40, -4.7511e-39, 2.3456e-39,
-1.5164e-39, 5.0431e-39, 5.1197e-39,
8.7052e-40, 1.4947e-39, -1.1546e-39,
5.3140e-02, 1.0281e-01, 1.4767e-01,
-6.1530e-02, -9.4166e-02, 4.8671e-02,
5.6787e-03, -1.4551e-01, 1.5614e-02,
-3.4826e-02, -5.1148e-02, 9.7079e-02,
-1.3603e-02, -1.2249e-01, -1.9330e-02,
-6.8184e-02, -1.4344e-01, -9.4023e-03,
-7.4629e-02, 3.9634e-02, 1.3445e-01,
4.2153e-02, 7.1129e-01, 2.8703e-02,
7.8247e-02, 7.2210e-01, -6.6198e-01,
-6.1010e-39, -6.2892e-39, -6.4008e-39,
-6.0825e-39, -6.3221e-39, -6.3883e-39,
-1.4962e-39, -1.1702e-39, -1.2143e-39,
5.5512e-02, -2.1522e-02, 1.0866e-01,
-9.2812e-02, -3.5119e-02, 1.1396e-01,
-1.3922e-01, 6.7287e-02, -5.5626e-02,
-2.0492e-01, 8.1441e-02, -1.3513e-01,
4.7447e-02, 2.0081e-01, -3.1249e-01,
-1.8546e-02, 2.0680e-01, 7.3979e-02,
8.8928e-02, -4.3606e-01, -8.4823e-02,
-5.6133e-02, 3.5132e-01, 1.8633e-01,
-4.3855e-03, 5.4869e-02, 1.1658e-01,
1.7423e-01, -5.3107e-02, 2.2925e-02,
-1.7622e-01, 4.4453e-02, 2.8131e-02,
2.6863e-01, -2.9085e-01, -1.5098e-01
}
,
{
-2.4230e-40, 5.4425e-39, 3.4517e-39,
-1.9803e-39, -1.5207e-39, -3.5630e-39,
-4.9409e-39, -2.9280e-39, 7.7966e-40,
2.4867e-39, -2.1848e-39, 3.2524e-39,
-6.2860e-39, 4.0411e-39, -3.6956e-39,
-3.3384e-39, -1.0908e-39, 5.4261e-39,
-3.6691e-40, 9.4949e-40, -1.7279e-39,
-1.0644e-39, -2.1371e-39, -2.5125e-39,
2.9368e-39, -5.3820e-39, -3.9771e-40,
-1.4703e-39, -3.6960e-39, -4.4161e-39,
8.2800e-40, -4.9175e-39, 3.1868e-39,
5.5703e-39, -3.0263e-39, -1.6991e-39,
5.2691e-39, 4.8127e-39, 4.1346e-39,
-1.3013e-39, -1.7101e-39, -3.5467e-39,
1.1496e-39, 2.0938e-39, -4.2970e-39,
-5.5314e-39, 6.4852e-40, -5.0870e-39,
3.9377e-39, -4.1683e-39, -3.5404e-40,
-3.6188e-39, 5.4657e-39, 2.1279e-39,
3.4090e-40, 2.4425e-40, 9.3423e-41,
-2.3450e-39, 3.1518e-40, 4.3061e-40,
-2.6175e-39, -2.4696e-39, -2.3755e-39,
2.2764e-39, -4.4934e-39, 8.5722e-40,
5.1798e-39, 2.7072e-39, 5.3750e-39,
5.4335e-40, 3.8556e-39, -3.4799e-39,
-4.8963e-39, -1.1413e-39, -5.3918e-40,
6.1843e-39, -1.8521e-39, -1.3450e-39,
-2.0906e-39, -3.2544e-39, -2.8205e-39,
5.3550e-39, -3.0202e-39, -3.4181e-39,
-3.0043e-39, -3.2900e-39, -3.2915e-39,
6.1849e-39, -3.3421e-39, -3.3995e-39,
-4.8657e-39, -4.7034e-39, -4.7467e-39,
-4.6555e-39, -4.6045e-39, -4.6954e-39,
-4.8886e-39, -4.7333e-39, -4.7805e-39,
-2.0900e-39, -1.9429e-39, -2.0572e-39,
-2.0270e-39, -1.9074e-39, -1.9275e-39,
-2.1243e-39, -2.1134e-39, -2.1539e-39,
-4.4175e-39, -4.6412e-39, -4.6582e-39,
-4.6364e-39, -4.8757e-39, -4.6795e-39,
-4.4571e-39, -4.5038e-39, -4.4570e-39,
-3.2662e-39, -3.1163e-39, -3.2050e-39,
-3.2098e-39, -3.0887e-39, -3.1635e-39,
-3.3183e-39, -3.1411e-39, -3.2824e-39,
8.6839e-40, 5.7318e-39, 1.8373e-40,
4.6732e-39, -4.5549e-41, 1.2817e-39,
3.7642e-41, -6.2591e-39, -5.0492e-39,
5.0057e-39, 6.0612e-39, 2.0220e-39,
3.7436e-39, 4.8326e-39, 3.1353e-39,
3.5289e-39, 4.7177e-39, 6.2666e-39,
-1.4963e-01, -8.0360e-02, -7.9054e-02,
-1.3731e-01, 5.0766e-02, 6.9673e-02,
3.2213e-02, 3.3250e-02, 1.3170e-01,
-2.9718e-02, -2.6931e-02, 1.5768e-02,
5.9232e-02, 7.8471e-02, 9.9465e-02,
2.4872e-02, -4.4226e-02, 3.2357e-02,
-6.0139e-02, -2.2756e-02, -5.5412e-02,
4.5363e-02, 1.6393e-01, 3.7428e-02,
5.2497e-02, 9.5435e-02, 9.7155e-02,
8.2849e-02, 5.9711e-02, 1.4352e-01,
1.1756e-02, 1.5440e-02, 1.3039e-01,
4.3324e-03, 5.9119e-02, 1.1129e-01,
-3.9591e-03, 5.8617e-02, -1.3843e-02,
-2.9949e-02, 3.4877e-02, 5.0679e-03,
3.7278e-02, -2.5221e-02, 1.2191e-01,
1.5626e-01, 8.9797e-02, -1.5458e-02,
1.5607e-01, 1.4561e-02, 1.1720e-01,
-1.6112e-02, 7.7908e-02, -6.1322e-02,
3.8589e-39, 3.9262e-39, 3.8641e-39,
3.9450e-39, 3.8805e-39, 3.9383e-39,
3.8384e-39, 3.8027e-39, 3.7700e-39,
6.2294e-02, -5.6804e-03, -4.7293e-01,
1.3161e-01, 3.1187e-01, -1.8013e-01,
4.9908e-02, 9.8583e-02, 3.8863e-02,
-1.7400e-39, 3.5779e-39, 5.2800e-39,
-1.6845e-39, 4.7140e-39, 2.4244e-39,
-1.3654e-39, 2.4123e-40, -1.5360e-39,
-1.0409e-39, 1.8590e-39, -5.2161e-41,
-8.5110e-40, -1.7210e-39, -4.6624e-39,
5.0754e-40, -2.6248e-39, -5.4801e-39,
-4.9486e-39, 2.8984e-39, 4.9357e-39,
-1.4077e-39, 3.8778e-39, 5.8202e-39,
-4.1095e-39, 6.8891e-40, 5.6565e-39,
3.8021e-39, -5.4740e-41, 2.1795e-39,
-2.4185e-39, -5.8101e-39, 1.5651e-39,
-4.9775e-39, 6.0152e-39, -5.2337e-39,
-4.4350e-39, -3.8239e-39, 3.1624e-40,
-4.3665e-39, -3.0919e-39, -4.7675e-39,
-2.3335e-39, 1.8270e-39, -5.5077e-39,
5.5906e-39, 6.7732e-41, 3.7359e-39,
-5.1412e-40, -2.3239e-39, 5.1937e-39,
-4.4951e-39, -3.4928e-40, -5.0589e-39,
4.9149e-39, 1.1372e-39, 6.6368e-40,
-1.8870e-40, -5.9117e-40, -1.3973e-39,
-2.3555e-39, -1.0637e-39, 3.1692e-39,
-4.8054e-39, 4.8090e-40, 2.0873e-39,
3.8301e-39, -3.8642e-39, 4.8187e-39,
-1.6563e-39, 8.9890e-40, -3.5162e-39,
-2.3010e-01, -7.4445e-02, -1.0006e-01,
-2.4543e-01, -8.5750e-02, 1.4859e-01,
-1.3783e-01, 1.2709e-01, 2.5012e-01,
1.0310e-01, -2.3520e-02, -8.1277e-02,
-2.9267e-02, 1.0686e-01, 4.6287e-02,
-1.2342e-02, -1.7104e-02, 8.4357e-02,
-1.8492e-02, -2.0711e-02, -3.5242e-02,
7.6163e-02, 6.0853e-02, 9.4248e-02,
6.2008e-02, 1.1373e-02, 2.6609e-02,
-7.8135e-02, 1.0672e-01, -5.8380e-02,
7.1618e-02, 2.7966e-04, 1.1835e-01,
1.1306e-01, -7.8578e-03, 5.1743e-03,
-1.2123e-01, 4.9640e-02, 7.3827e-02,
-1.0377e-01, -3.7377e-02, -3.6536e-02,
5.7489e-02, -4.6279e-04, 9.0068e-02,
4.0784e-05, -3.3328e-02, 5.1191e-02,
9.6538e-02, 7.1779e-02, 1.2121e-01,
1.1598e-01, -5.9055e-02, 8.2671e-02,
-1.7292e-39, -1.7848e-39, -1.7308e-39,
-3.2817e-39, -1.7274e-39, -3.3601e-39,
-1.7252e-39, -3.4067e-39, -1.7783e-39,
-7.4053e-02, -4.2785e-01, -4.7597e-01,
4.6309e-01, 7.6018e-02, -3.5885e-01,
3.0428e-01, 8.7449e-02, 9.7880e-02,
-3.4191e-02, 1.1834e-01, -4.3273e-02,
-6.0782e-01, 9.2387e-01, -1.3972e-01,
3.0665e-01, 4.7445e-01, 4.8683e-02,
-1.8865e-02, 9.9509e-02, -4.9881e-02,
2.1640e-02, -2.0941e-01, -1.4779e-01,
1.7808e-01, -1.2572e-01, -9.6756e-02,
-1.0143e-01, 8.3153e-02, -1.0478e-01,
1.6201e-01, 2.0740e-01, -1.2653e-01,
8.1654e-02, -7.6224e-02, -8.9864e-02,
4.5383e-02, -3.6893e-02, -1.0096e-01,
2.0389e-01, 2.2557e-01, -1.9685e-01,
-9.5198e-02, 2.2877e-01, 2.1135e-02,
-1.0919e-01, -1.7563e-01, -3.5255e-01,
-1.3447e-01, 3.3709e-01, -1.9043e-01,
-2.1422e-01, -2.8848e-01, -5.3921e-02,
5.5351e-02, -5.0579e-02, -1.6168e-01,
2.5282e-01, 1.9715e-01, -2.4035e-01,
-3.0800e-02, 1.9329e-01, -1.0893e-01,
-3.4416e-39, -1.8080e-39, -1.6625e-39,
-1.6612e-39, -1.7397e-39, -1.5953e-39,
5.3047e-39, 5.4221e-39, -1.1665e-39,
2.1838e-02, -7.0635e-02, 3.6095e-01,
5.1096e-01, 6.3838e-01, 5.0716e-01,
1.1642e-01, 1.8546e-01, 1.5989e-01,
1.0799e-01, 2.8380e-01, 1.4910e-01,
-2.4305e-01, 2.3084e-01, -9.9982e-02,
-4.6839e-01, 6.0376e-01, -1.2748e-02,
8.7608e-02, 9.8828e-02, 2.1469e-02,
-3.5384e-03, -1.5689e-01, -1.1411e-01,
2.0728e-02, 5.6814e-02, -1.1090e-02,
-3.9301e-02, -9.4325e-02, -6.2119e-02,
1.2842e-01, 9.7466e-02, -2.7502e-02,
1.6560e-01, 1.5058e-01, 2.2821e-02,
-8.1287e-02, -6.3940e-03, 3.2162e-02,
9.4116e-02, -6.2567e-02, -1.2704e-01,
5.4654e-02, 1.4885e-02, 3.8166e-03,
1.9830e-01, -2.5419e-01, -6.7067e-02,
3.2303e-01, 1.6037e-01, -3.0200e-02,
1.3011e-01, 7.5455e-02, -1.2726e-02,
-1.9198e-01, -1.5419e-01, -7.5420e-02,
1.6070e-01, -6.1031e-02, -2.0179e-01,
-1.5829e-02, 1.9918e-01, 1.0960e-01,
-5.5215e-39, -5.8659e-39, -5.5573e-39,
-6.2394e-39, -6.0172e-39, -6.0159e-39,
-4.0308e-39, -4.1217e-39, -4.1372e-39,
1.6143e-01, 1.7271e-01, 4.3534e-01,
-2.4312e-01, 4.0146e-01, 4.4693e-01,
1.5442e-01, 3.9885e-01, -1.4357e-01,
-6.0236e-02, -1.2324e-01, 6.1197e-02,
-2.5842e-02, -1.0266e-02, 1.5670e-03,
2.9103e-02, 2.9966e-02, 1.1286e-01,
3.4528e-02, 1.3039e-01, 9.2736e-02,
3.5193e-02, 5.6583e-02, 5.9465e-02,
1.2846e-01, 9.3387e-02, 9.2131e-02,
1.4974e-03, 1.0196e-01, 6.7632e-02,
8.9809e-02, 5.7568e-02, -6.0621e-02,
-2.7582e-03, 3.1935e-02, 3.1299e-02,
1.3595e-01, 4.9498e-02, 1.2535e-01,
-3.9396e-02, 4.8859e-02, 4.1389e-02,
3.7026e-02, 1.3667e-01, 7.5657e-03,
-5.3476e-02, 1.9677e-02, 9.5214e-02,
1.3136e-02, 7.5560e-02, 6.2428e-03,
-5.2378e-02, -1.8704e-02, 1.0657e-01,
-4.2938e-02, -5.0199e-02, 1.4357e-01,
-5.7002e-02, 1.4158e-01, 4.9442e-02,
-6.8383e-02, 1.1316e-01, 5.2071e-02,
1.5031e-40, 2.1250e-40, 1.8673e-40,
1.5681e-40, 1.3104e-40, 1.6173e-40,
2.1560e-40, 1.8582e-40, 1.7747e-40,
8.4848e-02, -1.9845e-01, -5.1844e-01,
3.0959e-01, 3.6682e-01, 3.1208e-02,
1.9871e-01, 2.8318e-01, 1.6066e-01
}
,
{
-2.7283e-39, -4.9031e-39, -2.1039e-39,
-1.0327e-39, -5.1679e-39, -4.3300e-39,
-5.2613e-39, -3.1707e-39, -6.0916e-39,
1.5840e-39, 1.6709e-39, 1.6120e-39,
1.6716e-39, 1.7418e-39, 1.6624e-39,
1.5922e-39, 1.7383e-39, 1.5668e-39,
1.1389e-01, -4.5774e-02, 6.1423e-02,
1.3858e-01, 2.3102e-02, -6.5079e-02,
1.3269e-01, 3.2387e-02, 7.6966e-02,
-2.1531e-39, -1.6063e-39, -3.2070e-39,
-2.8531e-39, 4.6956e-39, 1.4038e-39,
2.0509e-39, -4.4924e-39, -5.3658e-39,
1.1524e-01, -5.0115e-02, 9.4187e-02,
4.2477e-02, 1.4197e-01, 2.4986e-02,
-2.8688e-02, 9.2289e-02, 4.1965e-02,
-2.1691e-01, -6.6916e-04, -1.3026e-01,
-1.9143e-01, 1.2211e-01, 1.2562e-01,
-1.2273e-01, 7.1045e-02, 1.2396e-01,
-8.0861e-02, -4.4301e-03, 6.3144e-03,
3.0338e-02, -8.6463e-03, 5.5084e-02,
-1.8370e-01, -5.0287e-02, -7.2194e-02,
7.4570e-02, 5.4483e-02, -1.2639e-02,
1.2481e-01, 1.4683e-01, -4.7581e-02,
1.6748e-01, -3.1374e-02, -1.7271e-02,
1.9801e-39, -3.3469e-39, -4.7012e-39,
-2.9869e-39, -3.2752e-39, -2.2142e-39,
-4.2927e-39, -1.9635e-39, -8.7517e-40,
2.7286e-39, 2.7755e-39, 2.7501e-39,
2.7114e-39, 2.7711e-39, 2.6858e-39,
2.5562e-39, 2.6523e-39, 2.5846e-39,
1.4015e-01, 1.0486e-01, 1.2320e-01,
4.6545e-02, 1.2068e-01, 9.2531e-02,
1.0717e-01, 3.8738e-02, 1.0181e-01,
-7.4503e-40, -1.1490e-39, 6.1230e-41,
2.4896e-39, 5.3740e-39, -1.4060e-39,
1.9095e-39, -7.1020e-40, 3.5820e-39,
-1.4348e-02, 6.4128e-02, 6.1082e-02,
-1.1112e-02, 8.5993e-02, 2.4835e-02,
1.2794e-01, -9.1072e-02, -1.3487e-02,
-5.8057e-02, 1.3080e-01, 1.0895e-01,
-1.6436e-01, 9.8593e-03, 1.5586e-02,
-1.5336e-01, 3.6391e-02, 1.4539e-01,
-4.6112e-02, 3.0102e-02, 6.2460e-02,
-2.5510e-02, 2.0437e-02, -5.6816e-02,
-1.0308e-01, -1.5284e-01, -7.1036e-02,
5.5290e-02, -6.6632e-02, 4.2268e-02,
-2.7665e-02, 9.3415e-02, 5.1026e-02,
1.5652e-01, 1.0835e-01, 9.6131e-02,
-4.2583e-39, -3.4889e-39, -5.7522e-39,
4.2701e-40, 2.8095e-39, -3.5579e-39,
2.2286e-39, 4.9865e-39, 4.0469e-39,
-6.4320e-40, -3.3384e-39, -5.9025e-39,
-7.9075e-40, -3.0577e-39, -6.0007e-39,
-8.9627e-40, -2.8374e-39, -5.8866e-39,
6.3645e-03, -5.3080e-03, -5.1759e-02,
1.0665e-01, -6.3126e-02, 5.0918e-02,
7.2193e-02, -6.8836e-02, -6.5657e-02,
2.8519e-39, -5.0955e-39, -9.6085e-40,
-3.3563e-39, -5.6038e-39, -1.6256e-39,
2.6872e-39, 1.4728e-39, -1.9908e-39,
-1.5254e-02, 9.8323e-02, 4.5504e-02,
1.3855e-01, 6.9300e-02, 1.9135e-01,
-5.2321e-02, -6.0227e-03, -1.1734e-04,
-1.4457e-01, 9.2761e-02, 4.5219e-02,
-3.0361e-01, 3.4673e-01, -2.3110e-01,
2.1017e-01, 2.4983e-01, 3.1659e-01,
-6.0569e-02, -5.4348e-02, -7.6719e-02,
-6.5060e-02, 2.8902e-01, 8.0732e-02,
-3.3425e-01, -3.1361e-01, -2.7183e-01,
2.8035e-02, -5.8134e-02, -4.3880e-02,
-1.6375e-02, 9.8195e-02, -7.4011e-02,
-5.9523e-02, 1.0234e-01, -5.3357e-02,
2.3364e-39, -2.5324e-39, -4.8333e-40,
2.2903e-41, -3.3061e-39, -2.5779e-39,
-1.8164e-39, -4.9236e-39, -4.9272e-39,
-1.2809e-39, -1.1698e-39, -1.2564e-39,
-1.3111e-39, -1.1778e-39, -1.2543e-39,
-1.4772e-39, -1.4021e-39, -1.4721e-39,
8.8919e-02, -3.4541e-03, -4.9619e-02,
1.0997e-01, 1.0257e-01, 6.9950e-02,
9.2624e-02, 3.2712e-02, 8.7916e-02,
-5.0242e-39, -6.1320e-39, 8.7891e-40,
-4.9951e-39, 2.3873e-39, -2.7823e-39,
-3.6739e-39, -1.8903e-39, 5.2150e-39,
9.6288e-02, 9.7568e-03, -5.8178e-02,
2.3313e-02, 1.1725e-01, 1.0291e-01,
-1.0111e-01, 8.3706e-02, 9.6575e-03,
-8.2531e-02, 7.0089e-02, 1.0821e-01,
-1.1016e-01, 1.8977e-01, 2.5576e-01,
-1.0221e-01, 5.9236e-02, 6.1678e-02,
2.6234e-02, 9.6868e-02, 9.2432e-02,
4.9881e-02, 5.9121e-02, -1.0477e-02,
-1.4693e-01, -1.0030e-01, -1.0608e-01,
1.1936e-01, -2.2301e-02, 1.1363e-01,
1.3981e-01, 6.7734e-02, -8.2775e-02,
1.0404e-01, -7.7360e-03, 4.2523e-02,
-2.6052e-39, 5.7201e-39, -5.6049e-39,
-3.6314e-39, -5.9232e-39, -3.6970e-39,
3.4360e-39, -5.6848e-39, -3.8308e-39,
4.6279e-39, 5.8135e-39, 2.0652e-39,
3.9864e-39, 4.4000e-39, 5.5163e-39,
2.9644e-39, 2.7537e-39, 3.6593e-39,
4.7872e-02, -2.5857e-02, 4.8810e-02,
1.0389e-01, -1.0782e-01, 4.1365e-02,
9.5778e-02, -5.2341e-02, 4.5947e-02,
-8.2652e-40, -5.7602e-39, 4.6187e-39,
-2.8365e-39, 1.4981e-39, 6.2504e-39,
-4.8330e-39, 4.0283e-39, 4.9792e-39,
-1.0893e-03, -8.2708e-02, -1.7925e-01,
8.3461e-02, 3.1339e-02, 8.8096e-02,
7.3139e-02, -1.2212e-01, 1.0489e-02,
-2.4187e-01, -3.8397e-01, 1.3730e-01,
1.9217e-01, 1.4101e-01, 4.9795e-01,
-1.1441e-01, 3.3343e-01, 7.9194e-02,
1.4556e-01, -5.1060e-01, 2.1556e-01,
3.5719e-01, 2.7282e-01, -1.9015e-01,
-1.0941e-01, 2.7634e-02, 1.1833e-01,
-9.3316e-02, -4.1307e-03, 7.8613e-02,
-2.1526e-02, -6.7141e-02, 2.5513e-02,
-3.3942e-02, -8.6282e-02, 3.0446e-02,
-4.5124e-39, -2.7154e-39, 4.9467e-39,
-4.2299e-39, -5.9485e-39, -2.9606e-39,
-4.7642e-39, -4.7981e-39, -4.0169e-39,
-3.8238e-39, 5.7381e-39, 4.0097e-39,
1.9550e-39, 4.5523e-39, 3.1206e-39,
6.0200e-39, 3.0406e-39, 2.0498e-39,
-3.2474e-01, 1.1052e-02, 4.7197e-02,
-1.4658e-01, 1.6728e-01, 5.2190e-02,
4.3174e-02, 4.5864e-02, 5.4472e-02,
2.6403e-39, 2.7421e-39, -4.3011e-39,
-3.6258e-39, -1.3708e-39, 3.6147e-39,
-1.9471e-39, 4.5896e-39, 4.5992e-39,
-9.9986e-02, 7.0727e-02, 8.5023e-02,
2.2501e-02, 1.4343e-01, 1.1878e-01,
2.8126e-02, 7.3239e-02, 1.0468e-02,
4.5032e-01, 4.4730e-01, 1.3446e-01,
-1.3374e-01, 8.8554e-02, 3.5610e-01,
3.0584e-01, 2.3536e-01, 1.6161e-01,
-5.1485e-01, 1.2372e-01, 5.4379e-02,
-2.9665e-01, -3.3157e-02, -1.8688e-01,
5.1777e-02, -1.4315e-01, -1.1366e-01,
-2.4471e-01, 5.5554e-02, 8.9284e-02,
-1.6870e-01, 7.6156e-02, 1.2472e-01,
-1.5633e-01, 4.3184e-03, 1.1078e-01,
4.0579e-39, -3.8271e-39, 1.1535e-39,
6.6968e-40, -1.1545e-39, -5.4217e-40,
3.5566e-39, -4.4956e-40, -1.7097e-39,
-4.1778e-39, -3.7655e-39, -3.7148e-39,
-3.8013e-39, -3.5225e-39, -3.4678e-39,
-3.8369e-39, -3.5583e-39, -3.6518e-39,
-1.4894e-02, 2.4801e-03, -4.6996e-02,
6.7453e-04, 1.8799e-02, 2.9889e-02,
7.2700e-03, 1.2385e-01, 9.2522e-02,
3.9300e-39, 3.1853e-39, 2.8376e-39,
2.8888e-39, -4.8734e-39, 2.3402e-39,
-3.9710e-39, -4.3243e-39, 4.1151e-39,
1.6399e-02, -8.2828e-02, -5.8361e-02,
2.1315e-02, 1.1968e-02, 6.8727e-02,
3.8558e-02, 1.5451e-02, 5.4465e-04,
1.0549e-02, -8.6468e-02, -1.8535e-01,
-1.3616e-01, 2.7371e-01, 1.1157e-01,
-1.7097e-01, 1.3659e-01, 2.2831e-02,
-3.3897e-02, 1.3307e-01, 7.4482e-03,
4.8120e-01, 7.7053e-01, 5.3354e-01,
-2.4277e-01, -5.9136e-02, -1.3419e-01,
-7.4653e-02, -6.4169e-02, -2.9526e-02,
-3.6336e-02, 7.2362e-02, -3.5332e-02,
6.2628e-02, 6.2278e-02, 3.5639e-02,
3.6614e-39, -2.6150e-39, -3.5229e-39,
5.3538e-39, -1.2368e-39, 2.1530e-39,
4.8585e-39, -2.4150e-39, 5.2220e-40,
3.8610e-40, 1.4772e-39, 2.1962e-39,
-1.8493e-40, 1.1409e-39, 1.7309e-39,
-2.5751e-40, 9.1351e-40, 1.3106e-39,
6.2867e-02, -1.2727e-01, -6.5307e-02,
1.1415e-01, -4.5529e-02, -1.1358e-01,
4.3427e-02, -6.0994e-02, -7.7808e-02,
-4.1831e-39, 1.3230e-39, 5.5853e-39,
-3.4646e-39, -7.2824e-40, -3.4263e-39,
1.5344e-39, -5.8245e-39, 1.9910e-39,
1.1000e-02, -3.7088e-03, -8.0042e-02,
9.7603e-02, 8.6581e-02, -1.8921e-03,
2.2820e-01, 6.8073e-02, -8.1081e-02,
-3.3901e-01, -1.1231e-01, -8.6476e-02,
1.1147e-01, 4.9587e-01, -1.7039e-01,
-2.0702e-01, 5.8730e-02, -1.3475e-01,
2.3548e-01, -6.8044e-02, 9.4296e-02,
4.4803e-01, 6.1517e-03, -5.5192e-02,
-2.7304e-01, -2.6003e-02, 4.0713e-01,
2.8621e-02, 6.2698e-03, -1.4746e-01,
9.4819e-02, -1.3109e-02, 3.5540e-02,
4.4047e-02, 3.5066e-02, -9.5886e-03
}
,
{
-6.7011e-03, 1.7398e-01, 1.4767e-01,
-1.9882e-02, 1.9286e-01, 4.8626e-02,
1.1465e-01, -4.4017e-02, -1.9288e-01,
-7.5817e-02, 1.5598e-01, 1.2329e-01,
3.4126e-03, -9.4884e-02, -4.2276e-02,
3.9110e-02, -1.3477e-01, -4.4951e-02,
6.0450e-02, 4.4656e-01, 3.8954e-01,
-2.1207e-01, -1.0600e-02, -5.6351e-01,
1.8074e-01, 3.0797e-02, -4.0380e-01,
-1.0733e-01, 3.7228e-02, 9.7157e-02,
-7.5810e-03, 5.5605e-02, -9.1898e-02,
-1.4992e-01, -5.3206e-02, -1.9667e-01,
-1.6667e-01, 7.6091e-02, 1.7064e-01,
2.5322e-01, -9.4636e-03, -2.7899e-01,
4.2013e-02, 1.5693e-01, 3.1124e-01,
-2.1534e-02, 1.3915e-01, -2.8199e-01,
-2.9683e-03, 1.4445e-02, -1.5552e-01,
3.4759e-02, -2.0321e-01, -1.1155e-01,
3.6164e-02, 2.8664e-01, 2.3426e-01,
-1.2525e-01, -1.7195e-01, -5.2270e-02,
3.8782e-02, 5.7734e-02, 2.1945e-01,
1.0243e-01, -1.3159e-01, -1.7844e-01,
-6.0359e-02, 1.9125e-01, 3.3553e-01,
-1.0876e-01, -1.2149e-01, -5.7185e-01,
-2.0583e-02, -4.8168e-03, -7.1908e-02,
-2.3428e-02, 2.9902e-02, 1.0888e-02,
3.6383e-02, 1.0052e-01, 2.8972e-02,
1.1415e-03, -3.4518e-02, -9.0058e-02,
7.3207e-03, 6.0961e-02, 7.5629e-02,
-4.5969e-02, 2.4314e-02, 6.7658e-02,
-1.3043e-01, -3.0343e-01, -2.0799e-01,
-4.6261e-02, -1.7650e-02, -7.2160e-02,
-2.6291e-02, 1.5707e-01, 9.5021e-02,
-4.1030e-02, -8.1977e-02, -3.0776e-02,
-3.0685e-02, 8.2163e-03, 4.0357e-02,
-6.9633e-02, 6.0690e-02, 1.5418e-02,
-1.2814e-01, 7.3968e-02, -3.3742e-03,
-1.5239e-01, 8.9941e-03, 1.7877e-01,
2.1219e-01, -5.2057e-01, -2.2284e-01,
-3.4681e-02, -1.3594e-02, 1.6700e-01,
-7.7366e-02, 8.5138e-03, -4.3159e-02,
4.0597e-02, 9.7247e-04, -3.4326e-01,
-2.1424e-01, -1.6489e-01, -4.3248e-02,
1.5987e-01, 4.6235e-01, 2.6287e-01,
-1.2270e-02, 1.3165e-01, 5.3217e-02,
7.2716e-02, -7.0677e-02, -1.7740e-01,
-6.2357e-02, 1.1932e-01, 1.5733e-01,
-1.0275e-01, 1.4966e-01, 4.8125e-02,
-4.7150e-02, 1.5516e-01, 6.9615e-02,
6.1252e-02, 5.3859e-02, 1.7052e-01,
3.1940e-02, 1.1842e-01, 4.2265e-02,
-4.9531e-02, 1.1519e-01, 9.8914e-02,
1.3455e-01, 1.3177e-01, -2.7938e-03,
1.1895e-01, 1.1377e-01, 6.1035e-02,
8.0390e-02, -4.1028e-02, 3.7415e-03,
-1.0317e-01, 1.0279e-01, -6.5789e-03,
-2.3339e-02, 7.2741e-02, 4.1662e-02,
-7.4087e-02, 8.8531e-02, -4.9697e-02,
4.6134e-02, 1.4300e-01, 1.1720e-01,
3.8271e-03, 1.7108e-01, -2.4779e-02,
6.9844e-02, -4.6467e-02, -9.1699e-02,
5.5704e-02, -3.0312e-02, -7.8252e-03,
-4.3799e-02, -1.6623e-01, -2.3006e-02,
4.9214e-02, 3.1528e-02, 3.3302e-02,
3.1213e-02, 9.8880e-02, -1.1098e-01,
4.5092e-02, -1.6922e-03, -5.1380e-02,
7.6063e-02, 1.4159e-01, 4.1409e-02,
8.0812e-02, 9.7569e-02, 4.1532e-02,
-1.1136e-01, -4.3686e-02, -1.4144e-01,
-9.7717e-02, 4.8239e-02, 5.3374e-02,
-1.1827e-01, 1.0008e-01, 8.6368e-02,
-6.2572e-02, 3.6484e-02, -6.3361e-02,
4.1008e-03, 1.6709e-02, 4.0553e-02,
2.2766e-02, 2.7241e-02, 5.1786e-02,
1.3607e-02, 5.4638e-02, 6.9439e-02,
-2.4211e-02, 4.0065e-03, -1.9540e-03,
-9.5697e-03, 3.0503e-02, 3.5809e-02,
-4.3456e-02, 2.8959e-02, 4.2898e-02,
-1.5629e-02, -9.4347e-02, 7.2799e-02,
2.3115e-01, 7.3449e-02, 6.9354e-02,
1.6014e-01, 1.8878e-01, -2.2148e-02,
-4.9274e-02, -6.9233e-03, 1.0578e-02,
-4.3291e-02, -7.8361e-03, 1.6647e-02,
-5.6168e-02, 1.0317e-02, 3.1170e-02,
1.2530e-01, -3.2398e-02, -6.5690e-02,
-2.5805e-01, 3.6079e-02, 3.5390e-02,
-1.7236e-01, 6.6798e-03, 4.8924e-02,
1.3314e-01, 5.0646e-02, -3.4844e-02,
-1.2559e-01, -1.1774e-01, 1.2898e-01,
-7.7402e-02, -1.0703e-02, -2.6359e-01,
-3.8706e-02, -2.2082e-02, 2.7591e-03,
-8.2353e-02, -3.1941e-02, -1.1937e-01,
2.9747e-02, 2.0041e-01, -5.1984e-02,
1.7919e-01, 6.3603e-02, -5.5516e-02,
1.0116e-01, 8.7370e-02, -8.6624e-02,
-8.4314e-02, 3.5997e-02, 2.1161e-01,
1.0902e-39, 9.3514e-40, 9.3074e-40,
9.8377e-40, 1.1299e-39, 8.2024e-40,
1.2062e-39, 1.0405e-39, 1.0284e-39,
-5.7829e-40, -6.7489e-40, -6.3814e-40,
-6.8460e-40, -7.9377e-40, -7.6449e-40,
-4.7632e-40, -5.6022e-40, -5.2053e-40,
1.8459e-39, 2.1036e-39, 2.1848e-39,
2.0535e-39, 2.3728e-39, 2.4416e-39,
1.7027e-39, 2.0249e-39, 2.0833e-39,
9.1594e-40, 8.0493e-40, 7.7836e-40,
7.5889e-40, 6.3026e-40, 9.3384e-40,
9.6987e-40, 1.1273e-39, 8.1906e-40,
-7.9046e-39, -7.2328e-39, -7.1040e-39,
-7.9046e-39, -7.1862e-39, -7.4931e-39,
-6.5243e-39, -7.1117e-39, -6.9941e-39,
1.3577e-39, 3.5945e-40, -3.6833e-40,
1.3768e-39, 6.9779e-40, -7.5180e-40,
5.7295e-40, -6.0767e-41, -1.3085e-39,
7.7960e-39, 7.8579e-39, 7.4482e-39,
7.4224e-39, 7.5791e-39, 7.4378e-39,
6.5819e-39, 6.7271e-39, 6.6281e-39,
-1.6535e-39, -7.7817e-40, -8.5918e-40,
-2.0861e-39, -1.3658e-39, -1.0560e-39,
-3.4360e-39, -2.6878e-39, -2.6477e-39,
4.6460e-02, 1.1676e-01, -5.9846e-02,
8.6467e-03, -1.1287e-02, 7.0129e-02,
-1.1277e-01, 1.0321e-02, -1.9567e-02,
1.2145e-01, -7.1995e-02, -1.3615e-02,
9.7877e-02, 6.6061e-02, 1.0272e-02,
1.1391e-01, 5.6974e-02, 9.7472e-02,
-3.3605e-02, 6.1751e-02, -4.3004e-02,
-5.1040e-02, -3.8798e-02, -7.1736e-02,
-1.0179e-02, 8.5964e-02, -8.1435e-04,
2.5149e-02, 7.1990e-02, 8.1534e-02,
6.3133e-02, 5.8643e-02, 4.6756e-02,
-5.3580e-03, 3.4411e-02, 5.2957e-03,
1.0652e-01, -6.6035e-02, 8.5754e-02,
3.2919e-01, -1.5958e-02, 2.1694e-03,
-9.0943e-02, -2.1920e-02, 2.9706e-02,
4.7986e-02, 1.7105e-02, -5.7711e-02,
-4.2066e-03, 6.5668e-02, -1.6617e-01,
1.0057e-02, -2.0108e-03, -1.5499e-01,
6.7941e-02, 1.7352e-01, 4.9498e-02,
6.2013e-02, 9.6180e-02, -2.9861e-03,
-1.2482e-02, 9.5709e-03, -8.7913e-02,
-8.6954e-02, 9.9646e-03, 8.0050e-02,
-4.4157e-02, -6.3008e-03, 4.0645e-02,
-7.9624e-02, 1.0856e-01, -4.5341e-04,
7.1085e-02, 5.7002e-02, 1.1673e-02,
-5.1378e-02, -2.3945e-03, -5.9532e-02,
3.4998e-02, -3.6019e-02, 1.0428e-02,
5.9774e-03, 5.4993e-03, 2.4306e-02,
-5.9813e-03, 4.4999e-02, 7.4744e-02,
-3.0773e-02, -3.6835e-02, 5.8396e-04,
-3.8644e-01, 2.4563e-01, 1.2436e-01,
-3.2986e-01, -1.1044e-01, 2.0753e-01,
-1.3621e-01, -1.3544e-01, 5.8882e-02,
8.8837e-02, 5.7460e-02, -3.0960e-02,
-1.2598e-03, 3.9124e-02, -5.3322e-02,
-4.4227e-02, -3.8000e-02, -3.2677e-02,
1.5675e-01, 1.0808e-01, 1.1024e-01,
5.4468e-01, -5.9268e-01, 1.0088e-01,
8.2360e-02, 1.9646e-01, 6.4799e-03,
1.6357e-01, 6.8273e-02, -1.2051e-01,
4.9511e-02, 4.7334e-01, -4.8876e-02,
-1.3130e-01, -5.1568e-03, 1.0088e-01,
-5.8971e-02, 2.5775e-01, 9.0169e-02,
-3.0461e-01, -3.2353e-02, -2.0293e-01,
1.3897e-02, 1.4249e-01, -5.8661e-02,
-1.3624e-01, -5.3026e-02, 3.1038e-03,
-5.6211e-01, -2.8375e-01, -1.2524e-01,
-2.3813e-01, -2.2439e-02, -4.4082e-02,
9.9066e-02, -7.1735e-02, 2.2345e-02,
-1.4791e-02, 1.3225e-01, 8.9460e-02,
-4.8986e-02, -3.2296e-02, -4.7474e-02,
6.5865e-02, -8.0697e-02, -6.8475e-02,
-7.6845e-02, 1.1568e-01, 3.7443e-03,
1.0448e-01, -3.3206e-03, 5.4523e-02,
5.5741e-02, 5.0917e-02, 1.0209e-01,
-9.6729e-02, 7.8876e-02, -4.9550e-02,
-3.8926e-02, 7.1163e-02, 8.9436e-02,
-1.4001e-03, -9.4980e-02, -7.7747e-02,
9.4335e-02, 1.1605e-01, 9.5715e-02,
1.7951e-02, 4.3177e-03, -5.6937e-02,
4.4558e-02, -5.2562e-02, 4.0652e-02,
1.8058e-01, -1.0763e-01, 4.8927e-02,
-5.2569e-03, -1.3437e-01, 2.8578e-02,
1.3592e-02, -3.9346e-02, 1.0003e-01,
1.8091e-01, 7.2687e-03, -3.7241e-02,
6.0438e-02, 5.7872e-02, 7.3778e-02,
1.2411e-02, 4.1856e-02, -2.8892e-02,
3.2884e-02, 6.9072e-02, -5.9363e-02,
-1.7112e-01, -9.9734e-02, -7.3417e-02,
-8.9623e-02, 4.5292e-02, -1.6635e-01,
-3.1895e-02, 1.4284e-01, 2.0752e-01,
2.3383e-02, -1.3490e-02, 5.1593e-03
}
,
{
5.8708e-01, 2.6026e-01, 8.8379e-02,
3.1818e-01, 7.0055e-03, 1.1652e-01,
1.1719e-01, 8.7711e-02, -1.1687e-02,
7.5741e-02, -3.7970e-01, 1.6001e-01,
1.0739e-01, 3.1735e-01, 2.0061e-01,
8.6719e-02, 8.5111e-02, -3.9354e-02,
-9.9512e-02, -9.1524e-02, -9.7984e-02,
5.6333e-02, -1.5928e-01, 1.1998e-03,
2.7488e-02, 2.8168e-02, 1.3768e-01,
5.9686e-02, 2.8931e-01, -1.7131e-02,
1.6391e-01, 3.3748e-01, 1.2296e-01,
8.9242e-02, 1.4761e-01, 1.7187e-01,
-2.6352e-39, -4.0703e-39, -5.1751e-39,
-2.5214e-39, -3.9666e-39, -4.6282e-39,
-2.4635e-39, -3.6734e-39, -4.3359e-39,
-7.1654e-02, 7.9691e-03, -1.0219e-01,
-5.5684e-02, -1.3065e-01, -1.9106e-02,
1.0561e-01, 5.9054e-02, -2.1279e-02,
-1.8840e-02, 1.6690e-01, 3.8050e-01,
6.2779e-02, -1.2124e-01, 5.0304e-01,
2.1870e-02, 1.7631e-01, 1.4858e-01,
1.4614e-01, -1.1767e-01, -3.9155e-02,
1.2963e-01, -4.6753e-02, 1.3848e-01,
-8.2292e-02, 2.1908e-01, 6.2794e-02,
-3.2625e-01, -8.8528e-03, -6.5603e-03,
5.4245e-02, 2.7983e-01, 2.1608e-01,
8.5890e-02, 1.0955e-01, -1.1606e-01,
9.7435e-02, 1.5911e-01, 6.7285e-02,
3.9570e-02, 1.9333e-01, -1.5531e-02,
-2.3475e-01, -2.5006e-02, 2.8106e-02,
6.8740e-03, 1.3261e-01, -3.8563e-02,
8.8758e-02, -4.2225e-02, 4.7042e-02,
5.6284e-02, -2.8303e-02, 3.4532e-03,
-4.0265e-02, -3.0645e-02, -5.2059e-02,
-4.6196e-02, -2.4868e-02, -3.3257e-02,
-3.7208e-02, -2.4100e-03, -7.1959e-04,
6.4237e-39, 6.1438e-39, 6.5434e-39,
6.1596e-39, 6.1608e-39, 6.3157e-39,
6.4263e-39, 6.4625e-39, 6.5877e-39,
1.1092e-01, -4.4784e-02, 9.1292e-02,
9.2900e-02, 1.2459e-01, -7.1447e-02,
2.6158e-02, -5.0219e-02, -5.6136e-02,
-5.8603e-02, 2.9323e-02, -2.4230e-01,
-9.4921e-02, 1.9103e-01, 1.1670e-01,
1.2022e-02, 6.2830e-02, 3.0393e-01,
3.3819e-02, 1.0040e-01, 8.2600e-02,
-8.7604e-02, 7.0641e-02, -1.0132e-01,
-9.9371e-02, 8.9363e-02, -1.0703e-01,
4.4603e-01, 7.9636e-03, 1.8834e-01,
1.1859e-01, 4.0760e-01, 9.6841e-02,
-1.1735e-01, 2.3993e-01, -7.7916e-02,
6.3481e-02, -1.4958e-01, 1.1554e-02,
5.2668e-02, 3.4379e-01, 8.3536e-03,
-5.5403e-02, 1.1655e-01, -7.5022e-02,
-8.2992e-02, -7.0322e-02, -1.0078e-01,
-1.4516e-02, -1.6558e-02, 6.6806e-02,
-6.7454e-04, -5.7525e-02, 1.5772e-01,
1.6446e-01, -1.1897e-02, -8.3387e-02,
7.1339e-02, 1.6254e-01, 1.6963e-01,
1.2630e-02, 5.7933e-02, 8.4686e-02,
-5.6318e-39, -6.1837e-39, -6.1661e-39,
-5.9923e-39, -6.2371e-39, -6.4922e-39,
-6.4206e-39, -6.6092e-39, -7.1603e-39,
4.6507e-02, -4.5924e-02, -7.3838e-02,
-3.3012e-02, 5.1295e-02, -7.4884e-02,
7.5389e-02, 1.2002e-01, 3.9442e-03,
9.9461e-02, 1.9607e-01, 1.4896e-01,
-1.1191e-02, 1.8352e-01, 2.6778e-01,
8.0977e-02, 1.0885e-01, 2.5331e-01,
3.1503e-02, -3.0004e-01, -6.9114e-02,
2.0705e-01, -2.0978e-02, 1.5154e-01,
6.3033e-02, -1.5721e-01, 5.1067e-02,
-1.1220e-02, 1.5315e-01, 4.5277e-03,
3.3250e-01, 1.4207e-01, 1.3469e-01,
5.2996e-01, -2.5803e-01, -4.5525e-02,
3.9807e-02, -1.7088e-01, -1.2414e-01,
2.1564e-01, -2.9160e-01, -1.8796e-01,
1.5482e-02, 2.7005e-01, 8.2446e-02,
5.4906e-02, -1.0507e-01, -8.0069e-02,
-4.5729e-03, -2.0621e-02, 5.0088e-02,
2.5479e-02, 9.5924e-02, 8.3813e-02,
4.7833e-02, -2.6191e-01, 3.3483e-02,
6.1653e-02, 7.1940e-03, -1.3578e-01,
1.7662e-01, -2.8194e-02, -2.7509e-02,
-1.9419e-39, -2.4904e-39, -2.7567e-39,
-2.9896e-39, -3.2700e-39, -3.6336e-39,
-3.8942e-39, -4.2028e-39, -4.5229e-39,
-1.6839e-02, -9.4421e-02, -3.0147e-02,
-6.5974e-02, -1.6716e-02, 5.0672e-02,
-7.9841e-02, -4.7086e-03, 5.0016e-02,
1.8223e-04, 3.3984e-03, 5.1965e-02,
-7.3512e-02, -5.6604e-03, -1.1630e-01,
-1.0767e-01, 3.2261e-02, -2.0044e-01,
1.0995e-01, 4.3581e-02, -3.9397e-02,
-1.4476e-02, -2.3087e-02, 2.6423e-03,
1.2047e-02, 1.2084e-01, 1.8563e-01,
-2.8497e-01, -2.5353e-01, 1.0933e-01,
8.8974e-03, 1.3315e-01, 1.9153e-01,
2.0427e-02, -8.9900e-02, 2.2363e-02,
2.8575e-02, 1.6351e-01, 1.1876e-01,
-2.7438e-02, -1.0816e-03, -5.5680e-02,
5.1369e-02, -2.0575e-02, 4.5232e-02,
9.4988e-02, 2.5418e-02, 8.9888e-02,
9.6631e-02, 1.5828e-01, 1.1577e-01,
-2.9665e-02, 3.2035e-02, 1.4428e-01,
7.4352e-03, 2.4917e-03, 4.2713e-03,
1.2534e-02, 2.1314e-02, 1.5963e-02,
2.2920e-03, 2.1864e-02, 2.2921e-02,
7.1089e-40, 5.3581e-40, 4.5922e-40,
6.2492e-40, 4.6365e-40, 4.5466e-40,
9.2740e-40, 7.7219e-40, 7.4187e-40,
-7.0909e-02, 1.1127e-01, -8.8953e-02,
-5.0537e-04, 4.5664e-05, 1.3829e-02,
7.4380e-02, 1.3900e-03, 4.0345e-02,
5.7173e-02, 8.7514e-02, -3.9945e-01,
4.4116e-02, 1.4148e-01, -2.7578e-02,
-1.2133e-02, 1.9647e-01, -2.6767e-02,
8.5870e-02, -1.3723e-02, 1.3408e-02,
7.9471e-03, 7.8321e-02, 5.1118e-02,
-8.3660e-02, -7.1584e-02, 2.7423e-02,
-5.5651e-39, -3.2350e-39, 4.7534e-39,
-4.8581e-39, -5.8010e-39, 6.3268e-39,
-3.4016e-39, 6.2313e-39, 5.7413e-39,
-3.0708e-39, 6.0155e-39, -6.3317e-39,
-3.1054e-39, -5.5914e-39, -6.4181e-39,
-1.3636e-40, -6.0343e-39, -6.2034e-39,
1.0108e-39, -2.5283e-39, -8.6098e-40,
1.0088e-39, -2.3042e-39, -8.2029e-40,
1.2802e-39, -3.7761e-39, -4.6451e-40,
1.4160e-39, 7.3869e-40, 1.3275e-39,
1.2560e-39, 1.0078e-39, 1.2296e-39,
-2.4490e-39, 8.6071e-40, -2.4510e-39,
2.1753e-39, -2.0576e-39, -2.1365e-39,
2.0157e-39, 2.0755e-39, 1.9439e-39,
2.0998e-39, 2.0732e-39, 2.1072e-39,
-1.1289e-39, -1.6132e-39, 4.8117e-40,
1.2029e-39, -1.3112e-39, 6.4761e-40,
1.4958e-39, -9.2719e-40, 8.9526e-40,
3.6032e-39, -4.9803e-39, -2.4410e-39,
-1.6429e-39, -4.9602e-39, -5.9626e-39,
-1.6627e-39, -4.9809e-39, -5.6258e-39,
1.6619e-39, 1.7856e-39, 5.1822e-39,
1.5443e-39, 1.4215e-39, 6.1830e-39,
1.4242e-39, -1.7895e-39, 5.2206e-39,
-2.4764e-01, -2.8696e-01, -5.7562e-03,
1.9255e-01, 5.1335e-02, -1.4512e-01,
-1.1017e-02, -3.6505e-02, -1.1773e-01,
5.8651e-02, -1.9354e-02, 2.1595e-02,
-3.5114e-03, 1.8335e-01, 4.0043e-02,
1.0579e-01, -6.3055e-02, 2.6981e-02,
-1.4351e-02, -1.5029e-02, -9.7792e-02,
4.6718e-02, 3.8673e-02, -2.3410e-02,
-2.8942e-03, -8.4898e-03, -3.3613e-02,
2.0298e-01, 9.7218e-02, 1.5052e-01,
3.2108e-01, 2.6568e-01, 1.3809e-03,
1.0008e-01, 6.9262e-02, -4.7810e-02,
4.1291e-39, 4.3762e-39, 4.2724e-39,
4.5864e-39, 4.7827e-39, 4.8821e-39,
4.5529e-39, 4.6921e-39, 4.7519e-39,
9.1246e-03, -1.8136e-02, -5.8517e-03,
9.1080e-03, 4.2591e-02, -1.5604e-02,
-3.6270e-02, 5.9184e-02, 2.3189e-02,
4.2636e-02, 3.6600e-01, 4.7134e-01,
3.6666e-02, 4.3565e-01, 2.1105e-01,
-5.2747e-02, 4.0503e-01, 2.0926e-01,
8.8427e-02, 4.9138e-02, -2.3381e-01,
-5.6521e-02, 7.5013e-02, -1.4783e-01,
-4.7299e-02, -8.1200e-02, -6.5665e-02,
-1.6281e-01, -2.3070e-01, 5.4033e-02,
1.1527e-01, 3.4730e-01, 1.9293e-02,
-1.8352e-02, 2.0626e-01, -1.1955e-01,
8.1665e-02, 3.8584e-02, 2.7958e-03,
6.4294e-02, 1.3912e-01, -5.6370e-02,
-1.7618e-02, 9.0357e-02, -5.5021e-03,
9.3211e-05, 1.5219e-01, 1.0844e-01,
7.6218e-02, 1.7016e-01, 9.2438e-02,
4.3387e-02, 8.0141e-02, -3.2034e-02,
9.2121e-03, -2.8742e-03, -1.5988e-03,
9.1980e-03, 1.6983e-02, 3.3154e-03,
-2.5642e-02, 4.1607e-03, 6.9246e-03,
3.7665e-40, -4.0391e-41, -4.0502e-41,
2.2436e-40, -1.7190e-40, 1.6583e-40,
1.4090e-40, 2.2914e-41, 6.7388e-41,
-8.1776e-02, 9.0814e-02, 1.0222e-01,
-3.4949e-02, 1.0266e-01, 3.6826e-02,
-8.3856e-02, 1.1102e-01, 1.1026e-01,
1.5993e-02, -1.1626e-01, -3.0870e-01,
-3.4119e-03, 1.7638e-01, -1.9092e-01,
-1.2549e-01, 3.2538e-01, -7.9381e-02,
3.8433e-03, -8.2530e-02, 3.2103e-02,
-1.1637e-02, -1.0371e-01, 2.3851e-02,
2.5390e-02, 7.7085e-02, 8.9536e-02
}
,
{
-2.8918e-02, -8.3719e-02, -3.3026e-02,
-2.2620e-01, 2.4280e-02, -2.1254e-01,
2.8231e-02, 3.5323e-02, -2.8425e-02,
1.6891e-01, 3.8192e-03, 7.2794e-02,
-1.6364e-01, -4.1031e-02, -1.3141e-02,
-3.9478e-02, 1.4910e-01, -7.0978e-02,
-6.3880e-02, 9.8206e-02, 1.3163e-01,
1.5778e-01, 1.1914e-01, 3.3277e-01,
-3.6808e-01, -5.5627e-01, 1.4401e-01,
-4.0314e-01, 3.6298e-01, -3.8212e-02,
-2.3782e-01, 2.5410e-01, -2.2334e-01,
7.6542e-02, 9.4998e-02, 3.3399e-02,
-1.8601e-01, -1.8863e-02, -4.1835e-02,
-5.8671e-02, -8.9987e-02, -6.1069e-02,
-7.1062e-02, -9.5987e-02, 1.2318e-02,
5.4541e-39, -1.8871e-39, 4.5048e-39,
-2.2237e-39, -5.4753e-39, 1.4395e-39,
-3.5753e-39, 6.1466e-40, -2.1567e-39,
4.5273e-02, 1.1619e-02, 1.1379e-01,
1.4093e-01, 1.0444e-01, 1.1283e-01,
-3.0230e-02, 3.1937e-01, 5.0541e-02,
8.2862e-02, -3.1540e-02, -6.4833e-02,
1.5168e-01, 1.7613e-03, 4.2690e-02,
1.8820e-01, 4.3783e-02, 6.3473e-02,
8.0477e-02, 1.0397e-01, -3.6337e-02,
-7.2828e-02, 6.4048e-02, 4.2476e-02,
-1.3974e-04, -2.2468e-01, -4.9189e-02,
-2.7478e-03, 8.7663e-03, 4.3870e-02,
-3.3168e-02, 1.1915e-01, -1.8083e-02,
4.8155e-02, -4.1742e-02, 1.1251e-01,
-6.1535e-02, 5.1782e-02, -2.3494e-02,
5.1677e-02, 1.4067e-01, -1.0377e-01,
3.2951e-03, 1.1942e-02, -1.1775e-01,
-2.2104e-02, -8.1073e-02, -3.7509e-02,
6.8970e-03, 1.6406e-02, 4.6923e-02,
-8.8448e-03, 2.9130e-02, 3.1024e-02,
7.6795e-02, 4.6816e-02, -1.3204e-02,
1.3988e-01, 1.1175e-01, 8.7121e-02,
1.2097e-01, -3.8463e-02, 6.7387e-02,
1.4708e-39, 1.7125e-39, 2.7764e-39,
1.5203e-39, 1.5811e-39, 4.4921e-39,
1.8828e-39, 1.7593e-39, 2.3774e-39,
4.3474e-02, -4.7065e-02, -7.1999e-02,
6.0338e-02, 3.7240e-02, 2.8802e-02,
-4.0701e-02, 1.8627e-02, -1.8181e-02,
5.5169e-02, 1.1874e-01, -7.0475e-02,
-1.3438e-02, 1.4335e-01, 1.5180e-01,
5.6331e-02, 7.9719e-02, 6.2691e-03,
-6.6460e-02, 2.7455e-01, 5.5916e-02,
1.3515e-01, -3.7263e-01, 1.3463e-01,
-4.0820e-05, 3.1896e-01, -8.3871e-02,
-7.6172e-02, 6.1963e-02, -1.3804e-02,
-5.2852e-02, 1.0006e-01, -3.4106e-02,
6.7218e-02, -3.8616e-03, -7.1788e-02,
1.6386e-02, -1.8612e-02, -1.7354e-01,
-1.2166e-01, 1.2667e-02, -3.3852e-02,
-3.2897e-02, 1.0343e-01, 2.4924e-01,
-1.3272e-02, 1.5705e-01, 6.7731e-02,
1.0637e-01, 1.9482e-02, -2.0655e-01,
-5.9087e-03, -7.1073e-02, 1.8723e-02,
-2.6087e-02, 1.5997e-01, 9.6264e-02,
1.2431e-01, 1.1462e-01, -9.7197e-02,
-6.2347e-02, -4.5239e-02, -2.6443e-02,
3.7406e-39, -4.6345e-40, 3.7971e-39,
-3.8112e-39, -3.5585e-39, 4.6938e-39,
6.0588e-39, -4.2403e-39, 1.5311e-39,
1.6381e-01, -6.8390e-02, 2.6527e-02,
-9.8612e-02, 2.1953e-01, -2.1886e-01,
7.4841e-02, -1.2118e-01, -8.1700e-02,
4.4974e-02, 7.7514e-02, -8.4620e-02,
-2.9808e-02, 2.1591e-02, -3.9502e-02,
-5.5797e-02, -6.5105e-02, -5.9860e-02,
-3.7811e-01, -2.3056e-01, -7.4491e-02,
4.0833e-02, -2.2613e-01, -1.4986e-01,
-1.0974e-01, -6.5161e-01, 1.7546e-01,
7.7903e-02, -1.5969e-02, -6.3040e-02,
-1.7819e-01, -7.1414e-02, 1.8451e-02,
-1.0618e-01, 3.5614e-03, 3.6719e-02,
1.5666e-01, 3.9222e-01, 9.1678e-02,
1.4519e-01, 5.7331e-01, -7.3466e-02,
1.0271e-01, 1.0803e-01, -1.3150e-01,
3.7496e-01, 1.5001e-01, 1.4727e-01,
3.2151e-01, 1.2875e-01, -8.1645e-02,
2.8629e-01, 1.9329e-01, -8.0009e-02,
-9.9557e-02, -2.6954e-02, 2.6042e-02,
-5.3374e-02, 1.1369e-01, 4.6503e-02,
-3.4068e-02, 9.1849e-03, -9.1420e-02,
4.6343e-39, 4.8289e-40, 3.1694e-40,
-3.5093e-39, -4.7356e-39, 7.1265e-40,
-4.9626e-39, -2.1280e-39, 1.8542e-39,
-1.3634e-01, -5.4825e-02, -6.6125e-02,
-2.0694e-01, 1.4924e-01, 1.4028e-01,
3.2735e-02, 7.6360e-02, -9.2541e-02,
-1.2149e-01, -7.9789e-02, -2.9591e-02,
1.2852e-02, 1.2457e-01, 1.3081e-02,
-3.2966e-03, 1.1089e-01, 8.6461e-02,
1.4352e-01, 5.9238e-02, -2.1140e-02,
7.3999e-02, 2.0893e-01, 3.5512e-02,
-5.3110e-02, 3.9222e-01, 1.3103e-01,
1.0168e-01, 1.6685e-02, 5.1616e-02,
9.8241e-02, -1.6502e-01, -1.2586e-01,
8.3915e-02, 7.4837e-03, 5.7355e-02,
-3.4982e-02, -1.2773e-01, 6.8213e-02,
-1.4674e-01, -3.6844e-01, 8.1546e-02,
-1.5385e-01, -7.0368e-02, 4.3894e-02,
7.8201e-02, -1.3952e-01, 1.5154e-01,
2.3880e-02, 1.4078e-01, -1.2906e-01,
-1.8268e-01, -1.5687e-02, -1.2588e-01,
-9.4643e-03, 1.4718e-02, 7.4932e-02,
3.0996e-02, -1.2339e-01, 1.7452e-01,
4.4221e-02, -1.3808e-01, -1.0205e-02,
-8.6959e-40, -3.7907e-39, -1.6020e-41,
4.3567e-40, 1.4647e-39, 6.5692e-40,
5.4286e-39, 8.8667e-40, -3.5047e-39,
2.4116e-02, -9.5358e-02, 1.6468e-01,
3.1916e-01, -2.3472e-01, -2.1644e-01,
1.2945e-01, -1.8403e-02, -3.2247e-02,
1.3666e-02, -3.0548e-02, -4.7635e-02,
-9.2714e-02, -2.1605e-01, -5.9464e-02,
-8.9110e-03, -3.9299e-03, -2.3289e-02,
-1.7855e-01, 9.0661e-03, -1.9142e-02,
-5.6754e-02, -5.4451e-01, -5.7664e-01,
1.6835e-01, 2.0531e-02, 2.0812e-01,
5.2794e-02, -9.0414e-02, 3.5560e-02,
3.7395e-02, 5.9355e-02, -3.6676e-02,
3.8035e-02, 6.7844e-02, 1.1042e-01,
5.0372e-02, 6.8188e-02, -8.5353e-02,
2.2769e-01, 5.9758e-01, -7.4568e-02,
7.8316e-02, 8.4925e-02, -4.0400e-02,
-7.7984e-02, -2.0739e-01, 1.1736e-01,
2.4528e-02, 2.1850e-01, 2.5639e-01,
-2.4561e-02, 8.4661e-02, -9.2191e-02,
-2.7006e-02, -7.8921e-02, -2.7124e-02,
-5.9232e-03, -2.7693e-02, 5.9524e-02,
9.7704e-02, 9.6223e-02, 2.0432e-02,
-2.5588e-39, 5.5478e-39, -5.6209e-39,
-4.7285e-39, 4.5875e-39, -5.7483e-39,
6.7240e-40, -3.5113e-39, -3.6246e-39,
1.6870e-03, -2.1707e-01, -3.8895e-02,
-5.8465e-02, -5.9146e-02, 1.1936e-01,
-2.7727e-02, -9.5047e-02, -2.2627e-01,
-9.5155e-02, -7.1422e-02, 9.4611e-03,
3.7587e-03, 1.6966e-02, 2.8839e-02,
-3.0794e-02, 1.9888e-02, -5.2541e-02,
-1.0708e-02, 3.0171e-02, -3.0473e-01,
-1.0214e-01, 4.2017e-02, 2.5568e-01,
-9.8664e-02, -5.5928e-01, -7.6876e-02,
-8.6821e-03, 4.6484e-02, -3.0836e-01,
-1.0205e-01, 6.8113e-02, -2.8059e-01,
-5.7828e-02, 2.0990e-02, -1.2843e-01,
7.5680e-02, 1.7504e-02, 1.6278e-01,
1.4075e-01, 2.4361e-01, 2.2737e-01,
-1.3044e-01, 8.2145e-03, 1.6344e-01,
-2.4780e-03, 1.5108e-01, 1.3313e-02,
-9.5257e-02, 6.1810e-02, -1.9386e-01,
7.1365e-02, 1.5328e-01, 9.5848e-04,
1.2278e-01, 7.8318e-02, 3.3400e-02,
4.8597e-02, 6.0632e-02, -5.7238e-02,
3.2522e-02, 4.5926e-02, -9.5566e-02,
1.0844e-39, -3.2490e-39, -2.6904e-39,
-3.0517e-39, 4.7535e-39, 4.3440e-39,
-1.3996e-39, 4.5201e-39, -3.6165e-39,
-5.6164e-02, 1.0353e-01, 6.6228e-02,
8.2147e-02, 4.7827e-01, 1.2004e-01,
-6.8150e-02, 1.8340e-01, 2.2113e-01,
1.0580e-05, -2.0949e-01, -1.0358e-01,
1.6206e-01, 1.2538e-01, -1.3104e-01,
1.3700e-01, 2.9282e-02, -8.7020e-02,
4.5467e-39, 5.9787e-39, 2.6105e-39,
-1.2670e-39, 2.9513e-39, -1.0811e-39,
-3.9129e-39, -1.8499e-39, 2.9297e-39,
5.7414e-39, 5.5907e-39, 5.5702e-39,
5.9004e-39, 5.7585e-39, 6.3188e-39,
5.7395e-39, 5.6146e-39, 5.6451e-39,
-7.3964e-39, -6.3330e-39, -5.5236e-39,
-7.5172e-39, -5.8828e-39, -3.7555e-39,
-6.9528e-39, -7.7656e-39, -5.5115e-39,
-7.9031e-39, -7.8200e-39, -7.7914e-39,
-7.4570e-39, -7.6413e-39, -7.9054e-39,
-7.3437e-39, -6.7956e-39, -7.0789e-39,
-3.6774e-40, 1.3572e-40, 3.0250e-40,
-4.1792e-40, -4.6240e-40, 2.2528e-40,
-5.2143e-40, -5.6847e-40, -4.2768e-40,
-4.0128e-39, 1.3485e-39, 1.3436e-39,
1.5337e-39, -3.9186e-39, 1.2120e-39,
1.2992e-39, 1.5671e-39, 1.5659e-39,
-4.6533e-39, -4.7029e-39, -6.0334e-39,
-5.1157e-39, -5.3257e-39, -5.8595e-39,
-4.3046e-39, -4.4391e-39, -5.0039e-39,
-1.0025e-39, -1.0145e-39, -8.6762e-40,
-1.0282e-39, -1.0939e-39, -9.4134e-40,
-1.1868e-39, -1.2133e-39, -5.4261e-40
}
,
{
-1.2633e-01, 2.7332e-01, -4.6674e-01,
-9.4537e-03, 9.6797e-02, -6.4975e-01,
1.8103e-02, 2.7190e-03, 2.3888e-01,
4.8553e-02, -8.7297e-02, 1.8415e-01,
3.1194e-02, -7.2899e-02, -8.1835e-02,
7.1639e-02, -3.1455e-02, -6.2866e-02,
-2.1413e-02, 4.6066e-02, 9.2372e-02,
1.5761e-01, -1.0352e-01, -3.4808e-01,
2.3715e-02, 1.6453e-01, -1.3699e-01,
1.1705e-01, -1.6882e-02, 1.2575e-01,
-2.9834e-02, -1.1558e-01, 4.7318e-01,
3.5301e-02, 1.1246e-01, 3.5038e-03,
1.5837e-01, -2.9968e-01, 1.6094e-01,
4.0562e-02, -1.6329e-01, -3.7023e-02,
-3.9991e-02, 1.7001e-01, -2.7735e-03,
8.8139e-02, -2.4828e-01, 5.5751e-04,
-1.3871e-01, -2.4839e-01, 1.7996e-03,
-1.1670e-01, 3.3651e-02, -2.9559e-02,
3.8572e-03, 3.7329e-02, 4.7511e-02,
-7.8848e-02, 1.2844e-01, 9.2677e-02,
-8.5041e-02, 5.7212e-02, -1.0415e-02,
-3.2462e-39, 2.3003e-39, 4.9676e-39,
-3.9261e-39, -6.8290e-40, 5.9119e-39,
-4.1242e-39, -1.1996e-39, 3.8436e-39,
-2.3243e-02, -2.2525e-02, 3.9668e-02,
-1.1210e-01, -2.3892e-01, 1.6431e-01,
-1.3998e-01, -1.5857e-01, -1.5625e-01,
-1.7634e-02, -3.9174e-02, -9.0936e-03,
-3.9428e-03, -1.6411e-02, 2.6484e-03,
1.1376e-02, -2.9057e-03, 6.3382e-02,
4.8930e-02, 9.1298e-02, 1.8195e-02,
-6.3365e-02, -1.5407e-01, 8.1543e-02,
4.9919e-02, 1.6852e-01, 4.4053e-02,
-4.8682e-02, -7.3614e-02, -6.9206e-03,
-4.8193e-02, -2.3704e-01, -8.3394e-03,
5.6024e-02, 3.7845e-01, -2.4550e-02,
5.2050e-02, 2.2027e-01, -4.1328e-02,
-6.6327e-02, 1.0450e-01, 1.7058e-02,
-1.2047e-01, 5.2494e-02, -1.8018e-02,
5.4807e-02, 1.1177e-01, 2.3511e-02,
6.0413e-03, -3.2457e-02, 7.6611e-02,
-2.1276e-02, 3.0054e-02, 5.0752e-02,
7.5556e-02, 2.5734e-02, -6.0634e-02,
1.2201e-01, -4.1533e-01, 2.7634e-02,
4.5560e-01, 3.2832e-01, 2.6277e-02,
1.9889e-39, 3.8337e-39, 4.0170e-39,
1.5149e-39, 3.6456e-39, 4.0474e-39,
1.1508e-39, 2.7381e-39, 3.8673e-39,
-7.9206e-02, -2.0763e-02, -2.4842e-01,
-6.5777e-02, -1.8446e-01, 2.6178e-01,
-1.7908e-02, -2.3039e-01, -3.5767e-01,
1.0324e-02, 1.3610e-01, 8.6519e-02,
1.3499e-01, 3.1933e-02, 9.1822e-03,
-3.6017e-02, -2.2056e-01, -2.3258e-01,
-7.6185e-02, -2.8981e-01, -1.1816e-01,
-9.9048e-02, 5.3879e-02, -1.7351e-01,
-2.1874e-01, -1.2109e-01, -3.1457e-01,
5.1576e-02, -2.5656e-02, 4.6789e-02,
7.6286e-02, 6.0126e-01, -2.5925e-01,
-5.3443e-02, -3.3656e-01, 4.7585e-01,
-4.7442e-02, -5.1580e-02, -8.5216e-02,
-1.0600e-01, -1.3859e-01, -3.1484e-01,
2.1454e-01, -1.1851e-01, -7.6614e-02,
-7.8873e-03, -7.0275e-02, -1.0958e-01,
-8.0654e-02, 1.3946e-01, 2.5292e-01,
1.3254e-03, -6.7372e-02, -2.6429e-01,
-8.2344e-02, 1.2388e-01, 5.2930e-02,
8.3665e-02, 3.9729e-01, 4.7687e-02,
-4.4502e-02, -8.3105e-02, -1.6430e-01,
1.2825e-39, 1.7532e-39, 2.1774e-39,
-2.1331e-39, -2.1826e-39, -1.0009e-39,
3.7081e-39, 2.0015e-39, -5.8349e-40,
-3.5278e-02, 6.5211e-02, -5.4199e-03,
8.3961e-02, 3.1410e-02, 4.4510e-02,
-5.4905e-02, 4.0727e-02, -1.5710e-02,
1.0813e-01, 8.2043e-03, 4.1303e-02,
1.3405e-01, 1.4150e-01, 7.2155e-02,
3.3942e-02, -4.7781e-02, 1.6095e-01,
-1.4266e-01, -2.5283e-02, 6.4043e-03,
-1.8699e-02, 1.0895e-01, -2.1497e-02,
5.5074e-02, 1.7031e-02, 1.0572e-01,
7.3199e-04, 1.0813e-01, -9.0280e-05,
1.4808e-01, 2.5436e-01, -1.3749e-01,
2.2936e-02, -7.9733e-02, -2.2360e-01,
6.0406e-02, -1.2874e-01, -7.4692e-02,
-1.3216e-01, -9.9889e-03, 2.7608e-03,
-1.1412e-01, -5.1312e-02, -1.7196e-02,
-2.2800e-02, -1.2112e-01, -9.3855e-03,
3.6905e-02, 1.0049e-01, 9.0602e-03,
-7.3200e-02, 1.0628e-01, -4.8218e-02,
-4.6525e-02, 6.0314e-02, -3.6467e-03,
-8.0943e-02, 2.5461e-01, 1.5461e-01,
-5.7708e-02, -5.7823e-02, 5.4042e-02,
3.8847e-39, 3.5806e-39, 4.1610e-39,
3.9082e-39, 4.1898e-39, 4.1926e-39,
4.1200e-39, 4.3759e-39, 4.3977e-39,
-3.3576e-01, 9.5443e-02, 2.7804e-02,
-2.3834e-01, -7.2650e-01, -1.2229e-01,
1.0380e-01, 1.9520e-01, 3.4571e-02,
-3.7291e-02, 7.6216e-02, 8.6171e-02,
-1.6324e-01, -8.6759e-03, 4.3038e-02,
-3.4364e-02, -7.2777e-03, 3.7451e-02,
1.8826e-01, 1.6387e-01, -3.4750e-02,
-2.0203e-01, 2.4170e-01, 9.0358e-05,
-1.3049e-01, 9.6855e-02, -1.6737e-03,
-6.3782e-02, 7.1413e-02, -6.5077e-02,
-1.5262e-01, 4.3261e-01, -8.4224e-02,
6.4632e-02, 1.0553e-01, -1.5274e-01,
4.4294e-05, 8.6239e-02, 5.7537e-03,
-5.7633e-01, -5.0076e-03, -5.2298e-02,
1.8556e-01, -1.1332e-02, -2.7010e-02,
1.6155e-01, -3.0337e-02, -9.6808e-03,
-2.8404e-01, -2.7625e-02, 1.6058e-02,
5.7937e-02, -6.6464e-02, 1.1096e-02,
7.8268e-02, 8.6122e-02, 2.9298e-02,
6.4696e-02, 2.0285e-01, 4.3660e-02,
1.5339e-01, -3.7650e-02, 7.1438e-03,
-8.9058e-40, -3.6429e-39, -4.7562e-39,
8.3914e-40, -2.8054e-39, -3.6702e-39,
4.3666e-39, -1.0602e-39, -3.0369e-39,
7.2731e-02, -1.0227e-01, -1.9583e-02,
-1.7466e-02, -2.0097e-01, 9.3108e-02,
6.5196e-02, -1.1880e-01, -3.5152e-03,
-5.6533e-02, 6.2109e-02, 5.2029e-02,
5.7971e-02, 5.1577e-02, 6.6318e-02,
-2.1669e-03, 7.7274e-02, -4.0609e-02,
2.8531e-02, -8.3960e-02, 1.3615e-02,
-1.1151e-02, -1.4162e-03, 5.6661e-02,
-8.0954e-02, -1.0600e-01, 4.3276e-02,
7.6762e-04, 3.1437e-02, -6.1084e-02,
-8.1119e-02, 2.1406e-01, 6.0836e-02,
4.8105e-02, -1.6263e-01, 9.2555e-03,
1.1060e-01, -2.1090e-01, 1.6435e-01,
-1.0248e-01, -1.1884e-01, -7.9929e-02,
5.9980e-02, 1.0271e-01, -1.1891e-02,
-7.5044e-02, -2.3655e-02, -5.2865e-02,
2.1542e-02, 2.7305e-04, 1.3508e-01,
-1.2317e-02, 9.0742e-02, -3.0079e-03,
-9.9020e-02, 1.5578e-01, -2.1482e-03,
-8.9029e-02, 1.8470e-01, 3.7571e-02,
-2.0394e-01, -1.3735e-01, 2.9648e-02,
-4.3016e-40, -7.3591e-40, -7.3773e-40,
-4.1239e-40, -8.6029e-41, -6.9504e-42,
-7.5082e-40, 1.2975e-40, 2.1462e-40,
-1.8967e-02, -1.4903e-01, 8.1452e-02,
1.2099e-01, -2.5524e-02, 1.3285e-02,
-1.3780e-01, -5.3359e-02, -3.1310e-02,
-1.8984e-02, 4.1962e-02, 1.0186e-01,
-1.0823e-01, 1.1079e-01, 7.8613e-02,
-1.4521e-01, -7.7509e-02, 1.8768e-02,
5.0613e-03, -3.0459e-02, -6.3055e-02,
4.4540e-02, 2.0135e-01, 9.6351e-02,
-1.9495e-02, -1.2314e-01, 1.1720e-02,
2.1739e-02, 5.2098e-02, -4.0453e-02,
-9.9983e-02, 4.7578e-02, -2.7862e-02,
-8.6565e-02, 1.5241e-01, -4.0462e-02,
4.0458e-02, -1.2871e-01, -4.3491e-02,
9.8981e-02, -1.3637e-01, 2.0092e-02,
1.5626e-01, -8.4550e-04, -2.5701e-02,
1.8511e-02, -1.0257e-01, -7.3238e-02,
-3.9802e-02, -1.6120e-02, -7.4068e-04,
-1.1377e-02, 9.7975e-03, -9.0342e-02,
-6.7152e-02, 1.0208e-01, 2.5234e-02,
-4.3687e-02, 2.5334e-01, 9.2712e-02,
3.7702e-01, 4.1450e-02, 1.9934e-02,
-5.4201e-39, -6.7158e-39, -7.5025e-39,
-5.2548e-39, -6.4829e-39, -7.2782e-39,
-4.9999e-39, -5.9599e-39, -6.0469e-39,
3.5890e-02, -7.3738e-02, 9.8899e-02,
3.3312e-02, 5.8231e-02, -2.1348e-01,
8.6289e-02, 5.0837e-02, -6.5613e-02,
7.0208e-02, 4.1424e-02, -6.0761e-02,
4.4654e-02, -3.3590e-02, -5.3044e-02,
1.2319e-01, -4.4666e-02, -8.8193e-02,
-9.0463e-02, -3.0083e-02, 6.8075e-02,
4.2531e-02, 4.3248e-01, 1.3480e-01,
9.2389e-02, 1.3683e-01, -2.6092e-01,
2.8925e-02, 2.3317e-01, 7.8128e-02,
6.3444e-02, 1.6291e-01, -3.8727e-03,
6.9107e-02, 6.8477e-03, 3.9528e-01,
3.8471e-02, 3.0745e-02, 2.8446e-02,
1.0625e-02, -2.4006e-01, -1.2490e-01,
-1.3002e-01, 2.0025e-01, 4.7618e-02,
-3.9705e-02, -1.2017e-02, -9.8790e-02,
-1.2798e-02, -2.7540e-01, -1.5138e-01,
-1.0290e-01, 5.0112e-02, -1.7391e-01,
-9.7079e-02, -2.2350e-03, -5.9211e-02,
-2.4728e-01, 4.3353e-01, -1.9306e-01,
-1.8039e-01, 1.2689e-01, 5.2103e-02,
-4.5547e-39, -7.8040e-39, 4.1196e-39,
1.5214e-39, 9.3494e-40, -3.9058e-39,
7.8718e-39, 7.1728e-39, 5.3609e-39
}
,
{
-9.4505e-02, -7.0477e-02, -1.5792e-04,
-2.3475e-01, 5.8849e-02, -6.8161e-02,
7.0658e-03, -1.0276e-01, 7.2471e-02,
-7.3820e-03, -3.0740e-02, -1.1131e-01,
2.8429e-02, -3.5750e-01, -8.4683e-02,
-5.0210e-02, -3.1096e-03, -2.3730e-02,
4.5756e-02, -3.6724e-01, -7.6317e-02,
3.8467e-01, 5.5354e-02, 1.6943e-01,
-4.9403e-02, 7.4709e-02, -3.0550e-02,
-7.5324e-03, -1.6910e-01, -1.6103e-01,
4.6314e-02, 1.2912e-01, -3.0488e-02,
2.6388e-02, 5.6925e-02, 6.4396e-02,
3.7748e-03, -2.1310e-02, 1.1410e-01,
-7.0164e-03, 1.8228e-02, -2.5920e-01,
6.8416e-02, 1.3998e-01, 1.3290e-01,
-3.8861e-02, 8.9898e-02, -3.6631e-03,
3.5528e-02, 1.1249e-01, 3.7018e-02,
-6.2334e-02, -4.8470e-02, -4.4094e-02,
3.1574e-02, -1.2162e-01, 1.9669e-01,
-4.6605e-03, 1.1887e-02, -1.1958e-01,
-1.0736e-01, 6.0131e-02, -1.2829e-02,
2.1305e-01, -8.4750e-02, -2.7028e-02,
-3.0351e-01, -6.4246e-03, -7.9128e-02,
1.3081e-01, 9.5878e-02, 1.6193e-02,
-5.8335e-02, -5.5968e-02, -2.6284e-03,
-7.2218e-02, -1.1661e-02, 1.9413e-03,
-1.6043e-01, 1.1388e-01, -3.6473e-02,
-2.4077e-02, 1.2210e-01, 1.5531e-02,
1.5074e-01, -4.5545e-01, 6.1004e-02,
-6.3948e-02, 3.9804e-02, -4.8822e-04,
1.3135e-01, 9.2392e-02, 8.8914e-02,
1.2941e-01, -3.6052e-01, 3.9571e-02,
-2.4838e-02, 7.0425e-02, -1.9016e-02,
2.7629e-02, -7.0648e-02, -2.6838e-02,
-2.1844e-02, -9.6184e-02, -3.3611e-02,
8.5938e-02, 5.2663e-02, 2.2938e-02,
-6.9909e-03, -3.9627e-03, -6.5162e-02,
-4.9296e-03, -4.0383e-02, 6.7670e-01,
1.5251e-02, 2.1000e-01, -1.9137e-01,
2.2825e-02, 1.6640e-02, 3.8147e-02,
7.1902e-02, -4.9821e-02, -6.5592e-03,
1.5826e-02, 2.1626e-02, 1.1646e-02,
1.5180e-02, 1.5664e-01, 9.8696e-03,
-7.2901e-02, -2.1818e-01, 9.2465e-02,
6.4349e-02, 6.0290e-02, -2.1094e-02,
2.0633e-02, 4.8808e-02, 1.4080e-02,
4.8083e-02, -1.5979e-01, -5.3634e-02,
6.5004e-02, 7.0317e-02, 1.9117e-02,
-4.3048e-02, 5.9627e-02, -1.5068e-02,
1.8861e-01, -2.6868e-01, 1.2789e-03,
1.1273e-01, -2.7796e-01, 4.9841e-02,
4.9008e-03, 1.8241e-02, 4.3449e-02,
2.1420e-02, -1.0299e-01, -1.6235e-01,
-1.9300e-02, -1.5121e-02, 2.0616e-03,
-2.7591e-01, 3.9622e-02, -5.0492e-02,
1.1866e-01, 5.5502e-01, -2.3622e-02,
-6.1204e-03, -7.4778e-03, 6.7961e-03,
2.4215e-02, 2.1643e-03, 1.1442e-01,
7.5326e-02, 1.4455e-01, 8.0497e-02,
6.6115e-02, 2.9762e-02, 2.8680e-02,
3.7784e-03, -2.2769e-02, 2.4529e-02,
-1.1441e-02, 9.8463e-02, -1.2761e-02,
1.0642e-02, 5.2871e-02, 1.9650e-01,
-2.2225e-02, 3.1504e-02, 8.5645e-03,
4.9125e-02, 1.4439e-01, 8.4573e-02,
1.0103e-02, 1.9097e-02, 4.5579e-03,
-2.5773e-02, -4.0984e-02, -1.5402e-01,
5.3050e-02, 1.5509e-01, -1.9040e-01,
3.7700e-02, 1.0632e-01, -2.2520e-02,
-5.6582e-02, -4.6040e-02, -5.7562e-03,
-3.4924e-01, 3.2933e-01, 5.5211e-02,
2.3230e-02, 8.5108e-02, 3.7448e-02,
1.4266e-02, -7.2016e-02, 4.5252e-03,
-7.0246e-02, 3.9142e-01, -1.9216e-02,
2.0536e-01, -3.5615e-01, 3.8009e-02,
1.2252e-02, -5.7966e-02, 9.2672e-02,
2.4225e-02, -1.0186e-01, -1.4219e-01,
-2.8815e-02, 1.3088e-02, -2.6031e-03,
-6.2341e-02, -1.1216e-01, -7.2122e-02,
1.1812e-01, 4.3493e-01, 4.3593e-02,
-1.3524e-02, 4.8679e-03, -1.0598e-02,
3.4904e-02, 5.5813e-02, 4.6811e-02,
8.0928e-02, 7.6607e-02, 6.3968e-02,
5.4647e-02, 2.8693e-02, 2.1957e-02,
-8.2725e-03, 5.4668e-02, -3.0533e-02,
-9.3953e-03, 1.5874e-01, -3.6093e-01,
5.6412e-03, 1.8977e-02, 2.0088e-01,
-1.9414e-02, 1.9088e-02, 1.4504e-02,
5.8462e-02, 6.2645e-02, 4.9884e-02,
6.6913e-03, 4.3639e-02, 1.5139e-02,
-2.1897e-02, -1.1436e-01, -5.0838e-02,
7.1176e-02, 8.4667e-02, -1.4480e-01,
3.7676e-02, 1.0840e-01, -2.6417e-02,
-4.7584e-02, -4.0524e-02, 6.3032e-03,
-2.4822e-01, 2.4635e-01, 5.5942e-03,
-1.3347e-02, 1.0515e-01, 4.2549e-02,
-1.2380e-01, 4.1074e-02, 1.2608e-02,
-1.2042e-01, 2.9516e-01, 2.8380e-03,
5.1930e-01, -1.6498e-01, 5.7152e-02,
-6.5519e-02, 1.1001e-01, 2.8943e-02,
1.0854e-01, -6.0107e-02, -1.6730e-01,
-4.4417e-02, 3.4347e-02, -3.3756e-02,
2.0694e-01, 3.3047e-01, -9.4497e-02,
-2.1977e-01, 4.6614e-02, 1.2201e-01,
-2.9541e-02, 1.8900e-01, -1.8391e-01,
2.0064e-02, -3.2480e-02, -8.9041e-03,
-5.6385e-02, -6.4531e-02, 1.2879e-02,
-3.2499e-02, 1.0883e-02, 7.3564e-03,
1.9828e-02, -2.3278e-01, -4.3789e-03,
9.7669e-02, 1.3008e-01, -1.0405e-01,
2.2618e-02, -2.5495e-01, -1.0718e-01,
4.3524e-02, -7.3127e-02, 8.2424e-02,
-5.0193e-02, 4.0634e-03, 4.0696e-02,
2.7419e-02, 1.8353e-01, 9.2117e-02,
-7.4918e-02, 1.0602e-01, -3.4752e-02,
-1.3331e-01, -2.9583e-02, -5.2197e-03,
-3.7852e-02, 1.5998e-01, 1.5078e-03,
-5.6512e-02, 1.3378e-01, 1.4512e-02,
4.5255e-02, 2.4702e-01, -2.4848e-02,
-1.7526e-01, 1.5532e-01, 8.6686e-02,
3.1486e-02, -2.3247e-02, 9.7320e-03,
-5.2106e-01, 4.7937e-02, 4.1614e-02,
5.5436e-02, -2.0432e-01, 1.2444e-02,
-5.6792e-02, -5.5632e-02, 5.7612e-02,
-6.0248e-04, 4.9770e-02, -6.7956e-02,
1.3389e-02, -9.4141e-03, -7.3497e-03,
-4.6361e-01, 2.7450e-01, -8.2210e-02,
-2.6737e-01, -6.6114e-02, 6.3568e-02,
1.6910e-02, 1.4456e-01, -9.0081e-02,
8.8278e-03, 2.1776e-02, 8.7710e-03,
-2.3378e-02, -4.3907e-02, -3.6751e-02,
-2.4694e-03, -6.0419e-03, 3.0840e-02,
-1.6968e-02, -8.2266e-02, -1.0049e-01,
3.4429e-02, 1.0960e-01, 3.8355e-01,
-4.0301e-04, -3.1089e-02, -2.1373e-02,
-2.4172e-02, 4.6432e-02, 8.0742e-03,
-2.3134e-02, 1.7789e-02, 2.7136e-02,
3.0729e-02, 6.9008e-03, 1.2822e-02,
3.5043e-02, -6.1749e-02, -1.2565e-02,
-1.0354e-02, -2.6515e-03, 4.5632e-03,
-5.9818e-02, -9.7686e-04, -6.6467e-03,
-5.0833e-01, 1.8474e-02, 1.3598e-02,
3.6287e-01, 1.3698e-01, -1.2806e-02,
-2.8618e-02, -2.9128e-02, 2.9855e-02,
8.1243e-02, 4.7414e-02, -4.7434e-02,
-3.3738e-02, -3.4926e-01, 1.7786e-02,
1.0056e-01, -5.7937e-02, -1.8308e-02,
1.8214e-02, -1.9519e-01, 2.2152e-02,
-7.3543e-02, 2.0786e-01, -5.8196e-02,
3.9396e-02, -4.5349e-02, 1.5748e-02,
-5.4604e-03, 4.5777e-01, 1.7295e-01,
-2.0570e-01, -3.0970e-01, -1.9075e-01,
7.6751e-02, -1.3099e-01, 6.1278e-02,
6.0222e-02, 5.4418e-02, 1.2259e-01,
3.2160e-02, 8.5146e-03, 3.4578e-02,
-5.4391e-02, -2.5285e-02, 1.0251e-02,
-3.2763e-02, 7.9163e-02, -7.5136e-02,
1.8545e-02, -2.1972e-02, 1.3887e+00,
-1.2402e-03, -2.5679e-01, 7.2392e-02,
4.9692e-03, 1.7034e-02, 4.7043e-02,
1.2093e-02, -3.1230e-02, -8.2613e-03,
-7.8701e-03, -2.3516e-03, -7.2487e-04,
6.8495e-02, -5.2837e-02, -2.2482e-01,
1.3259e-02, 4.8009e-01, -4.0940e-02,
-4.1547e-02, -2.8753e-02, -5.2579e-03,
-1.7152e-01, -3.3676e-02, 1.5080e-02,
8.6014e-02, 7.9239e-02, 4.2196e-02,
-9.2870e-02, -1.5913e-02, -6.5804e-03,
4.0364e-02, 2.4914e-02, -1.4638e-02,
8.8705e-03, 2.8037e-01, 3.9890e-02,
1.1638e-01, 2.9467e-01, -4.3518e-03,
7.1091e-02, -2.2378e-01, 4.7315e-02,
3.8006e-02, -2.0246e-01, -3.8679e-02,
-5.8004e-02, 5.8991e-02, -6.2149e-03,
-1.3034e-01, 1.5540e-01, -5.2558e-02,
8.1594e-02, 3.5570e-01, 2.1220e-02,
1.4977e-02, 2.4493e-03, -4.0627e-02,
1.1402e-01, 6.6962e-02, 1.1150e-01,
1.1824e-01, 1.1492e-01, 1.1219e-01,
6.6067e-02, 6.9639e-02, -8.1836e-02,
-2.7144e-02, 1.4677e-01, -5.9261e-02,
4.4573e-03, 2.6235e-01, -7.4379e-01,
-8.3569e-03, 9.4465e-02, -6.5653e-03,
2.1095e-02, -1.8853e-02, 6.7972e-02,
1.2957e-01, 3.0122e-02, -1.0061e-02,
-3.4832e-02, 8.5404e-02, 5.7663e-02,
-5.0400e-02, -1.2050e-01, -2.3344e-01,
1.4977e-01, 7.8806e-02, 6.0771e-03,
5.6483e-02, 6.3927e-02, -5.8376e-03,
-2.8124e-01, 5.2581e-02, -1.3918e-04,
-1.4341e-01, 3.6558e-01, 4.7332e-02,
-3.9089e-02, 8.4188e-02, 2.7058e-02
}
};
static __device__ __constant__ const float HDNL2biasL[8][8] =
{
{
7.2678e-02, 8.5350e-03, 5.0400e-02, 2.6268e-02, 6.2434e-02, 1.0483e-01, -7.1650e-39, 1.0062e-01
}
,
{
-4.9844e-39, -1.8567e-39, 6.0627e-04, -1.9234e-38, 1.8331e-02, -1.1364e-01, -8.3962e-03, -1.7372e-04
}
,
{
-0.0091, -0.0055, 0.0237, 0.0093, -0.0479, 0.0188, -0.0034, 0.0399
}
,
{
6.5694e-03, -2.2259e-01, -1.1226e-02, -8.0327e-02, -1.0615e-36, 1.0402e-02, 7.6246e-03, -6.5940e-02
}
,
{
5.0711e-02, 7.1911e-02, 2.5293e-02, -1.5608e-02, 5.3835e-02, -1.6967e-38, 2.2243e-02, 3.2742e-02
}
,
{
1.5629e-02, 2.9703e-02, 2.6412e-02, 1.2301e-02, 1.8654e-01, -7.2260e-03, 2.4613e-02, -3.1853e-38
}
,
{
-0.0030, -0.0123, 0.0348, 0.0277, -0.0152, 0.0005, -0.0124, -0.0209
}
,
{
7.4856e-03, 7.2931e-04, 8.3015e-03, 6.4820e-03, 2.4008e-04, 7.0377e-06, 1.7948e-03, 8.9869e-03
}
};
static __device__ __constant__ const float HDNL2kernelsL10[4 * 8] =
{
0.4240, 0.4165,
0.1648, 0.1909,
-0.0985, -0.4455,
0.4639, -0.0533,
-0.1368, 0.4413,
0.2539, 0.3294,
0.2458, -0.3256,
-0.0479, 0.3200,
-0.3977, -0.0422,
-0.2736, 0.1053,
0.3902, 0.0594,
-0.0721, -0.2988,
0.0495, 0.1309,
-0.1703, 0.0033,
0.3061, 0.1827,
0.2443, -0.1259
};
static __device__ __constant__ const float HDNL3kernelsL1[9 * 8] =
{
-0.0461, 0.1274, 0.2976,
-0.0393, -0.1251, 0.2527,
0.0791, 0.0600, -0.0303,
-0.0520, -0.5039, -0.3305,
-0.0115, 0.0456, 0.4370,
0.0601, 0.0780, 0.3106,
-0.0017, -0.0018, -0.0017,
-0.0017, -0.0018, -0.0018,
-0.0017, -0.0017, -0.0017,
0.2666, 0.1687, 0.2303,
-0.1901, 0.3825, 0.3024,
0.1811, 0.0581, 0.2080,
-0.1246, 0.0155, -0.4075,
0.1156, 0.5929, 0.1449,
-0.1080, -0.0171, -0.0516,
-0.0817, 0.2247, 0.0472,
0.0394, 0.1085, 0.1435,
-0.0480, -0.0135, -0.0606,
-0.0083, 0.2045, 0.1056,
-0.2239, 0.2823, -0.1926,
0.2581, 0.1362, -0.1914,
-0.0833, 0.0702, 0.0234,
0.3616, 0.3789, -0.1840,
0.0128, 0.1347, -0.0187
};
static __device__ __constant__ const float HDNL3biasL1[8] =
{
-0.1329, -0.0431, -0.0031, -0.0129, 0.2294, -0.2595, -0.2370, -0.0499
};
static __device__ const float HDNL3kernelsL[8][9 * 8 * 8] =
{
{
1.4090e-01, -1.8985e-02, -6.8589e-02,
6.6491e-02, 1.4360e-02, 8.5223e-02,
1.8782e-01, 9.8042e-02, -3.4558e-02,
2.5606e-01, 2.2027e-01, 2.7603e-01,
1.9424e-01, 3.4537e-02, 9.5975e-02,
1.1223e-02, -4.3377e-01, -1.4760e-01,
-3.4293e-40, -5.5421e-40, -4.4763e-41,
-6.3322e-40, -3.1495e-40, -7.8264e-41,
-1.5375e-40, -3.3656e-40, 5.2441e-40,
1.2413e-01, 1.5682e-01, 1.1465e-01,
1.6683e-02, 7.8382e-02, 1.0110e-01,
1.4902e-01, 1.3608e-01, 1.1674e-01,
-6.5160e-02, 7.7748e-02, 2.1773e-02,
2.0652e-02, 2.7245e-01, 1.0297e-01,
-2.0953e-02, 6.1685e-02, 4.4128e-02,
6.1538e-02, -1.9746e-02, -1.2785e-02,
2.5931e-02, 1.2740e-01, 9.0033e-02,
8.6448e-02, 2.0684e-01, 9.8063e-02,
-7.8384e-03, 6.3277e-02, 7.6751e-03,
3.5956e-02, 1.0555e-01, 4.2728e-02,
7.1578e-02, 1.3253e-01, 1.1171e-01,
-2.7538e-02, 1.5836e-01, 1.0014e-01,
-4.9113e-02, 1.6911e-01, 2.7329e-01,
7.9170e-03, 9.5440e-02, 1.3922e-01,
8.0151e-02, 4.3438e-02, 5.5314e-02,
3.4896e-02, 1.6816e-01, -4.5783e-03,
-1.4579e-03, 2.0493e-01, 2.6238e-02,
2.6499e-02, 3.9490e-01, -1.1582e-02,
3.5790e-01, 1.4317e-01, -2.1775e-01,
4.1794e-03, -3.2513e-01, -1.6729e-01,
3.4040e-41, -6.2960e-42, -1.0067e-40,
5.5978e-41, -1.2353e-40, -1.1347e-40,
5.4572e-40, -6.4384e-40, -4.1234e-40,
-9.3690e-02, 1.7765e-01, 1.1275e-01,
9.1159e-03, 1.7375e-01, 1.1427e-01,
-7.8385e-02, 1.5658e-01, -3.8399e-02,
-1.0756e-01, 5.9943e-02, -6.7273e-02,
-1.1117e-01, 1.5267e-01, 1.1563e-01,
-1.2964e-01, -3.8604e-02, -2.4532e-02,
1.6324e-02, 1.3112e-01, 6.1679e-03,
-7.7703e-03, 2.6311e-01, 8.9427e-02,
-2.8948e-02, 1.9341e-01, 4.4339e-02,
6.4559e-03, -6.8885e-02, 1.1481e-01,
-1.0665e-01, 3.8613e-02, 7.0410e-02,
-6.1680e-02, -1.7374e-02, 9.5475e-03,
-4.0081e-02, -3.1549e-02, 2.8311e-01,
-1.2178e-01, -1.3848e-01, 1.7416e-01,
-8.1756e-02, -1.7718e-01, 7.9533e-02,
-3.1299e-03, -3.2305e-03, -3.2094e-03,
-3.1548e-03, -3.2553e-03, -3.2453e-03,
-3.1459e-03, -3.2278e-03, -3.2076e-03,
-3.6554e-05, -3.6715e-05, -3.1284e-05,
-1.4927e-05, -1.4357e-05, -1.2185e-05,
-1.5771e-09, -1.1439e-09, -6.4952e-10,
3.7723e-40, 4.9166e-40, -2.1946e-40,
-4.7599e-40, -4.3356e-40, -8.3928e-41,
2.6127e-40, 4.8634e-40, 2.7720e-40,
-5.4972e-03, -5.6409e-03, -5.6919e-03,
-5.5818e-03, -5.7079e-03, -5.7542e-03,
-5.6338e-03, -5.7437e-03, -5.7600e-03,
-3.7940e-03, -3.8853e-03, -3.8693e-03,
-3.8995e-03, -3.9616e-03, -3.8945e-03,
-3.8438e-03, -3.9156e-03, -3.8269e-03,
-7.2342e-05, -7.8682e-05, -4.7701e-05,
-1.1126e-04, -1.1918e-04, -7.8931e-05,
-1.1644e-04, -1.2418e-04, -8.2350e-05,
-2.3881e-04, -3.7971e-04, -3.9448e-04,
-2.4112e-04, -3.8395e-04, -4.0189e-04,
-2.3451e-04, -3.7525e-04, -3.9222e-04,
-3.9853e-03, -4.0748e-03, -4.1134e-03,
-4.0685e-03, -4.1456e-03, -4.1548e-03,
-4.0547e-03, -4.1388e-03, -4.1357e-03,
5.3008e-02, 2.2252e-02, -7.1158e-02,
-6.6411e-02, -3.0015e-02, -2.2526e-02,
1.2259e-01, -6.2488e-02, 5.6190e-02,
1.5981e-02, -7.6832e-02, 1.7908e-02,
2.7618e-01, 5.4054e-02, 8.7282e-02,
1.5212e-02, -1.1097e-01, -2.2265e-02,
-6.8532e-41, -6.0539e-40, 4.6269e-40,
-2.9221e-40, -3.8468e-40, -4.6656e-40,
6.4572e-40, -6.1625e-40, 6.4545e-40,
3.5920e-02, 9.0955e-02, -1.7626e-02,
4.7826e-02, 1.8832e-01, -4.4043e-02,
-3.8405e-02, 5.9176e-02, 6.8182e-02,
3.7657e-03, 2.6441e-02, -2.5585e-01,
1.0969e-01, 2.3914e-01, 3.5120e-02,
-1.6252e-01, 3.4371e-02, -2.7501e-01,
4.9289e-02, 2.2088e-02, -1.4588e-02,
1.6384e-01, -8.1421e-03, -6.9613e-02,
1.0820e-01, 1.1137e-01, 7.2648e-03,
1.5243e-01, 1.3659e-01, 2.7553e-02,
1.3966e-01, 1.1019e-01, 1.9817e-02,
1.1420e-01, -5.1386e-03, 6.8617e-03,
-1.3264e-02, 2.1508e-01, 4.8430e-02,
5.1149e-02, 2.9165e-01, 2.8077e-01,
2.9288e-03, 9.0611e-02, 8.1538e-02,
-1.1812e-01, 1.5603e-02, 1.1571e-01,
-3.4958e-02, -1.6688e-03, -4.6619e-02,
-1.0417e-02, -3.1802e-02, 1.8357e-02,
1.1064e-01, 1.8397e-01, 4.8449e-02,
-8.3336e-03, 1.6029e-01, 3.9490e-02,
-4.0959e-01, -2.6134e-01, 2.0766e-02,
6.6073e-41, -6.7490e-40, -5.1131e-41,
-4.3320e-41, -3.7194e-40, 2.0674e-40,
-5.2359e-40, -3.4006e-40, -4.9257e-40,
-4.7260e-02, 2.8518e-03, -2.7764e-01,
6.9182e-03, 1.3938e-01, -1.3162e-01,
-6.0901e-03, 1.0339e-01, 6.0419e-02,
-1.4449e-01, -3.2043e-02, -9.1466e-02,
-1.4022e-02, 3.1703e-01, 5.8166e-02,
-1.5243e-02, 1.4521e-01, 2.0790e-04,
-1.0255e-01, -7.8766e-02, -1.2395e-01,
7.9894e-03, 3.7079e-03, -3.2134e-02,
1.1663e-01, 1.4808e-01, 2.0431e-01,
7.4026e-02, 6.9632e-02, 1.7156e-01,
-3.0385e-02, 2.3218e-01, 7.3855e-02,
-8.8530e-02, -5.9224e-02, 2.3431e-02,
1.4596e-02, 3.2442e-02, -1.1308e-01,
-6.3734e-02, 2.5270e-01, 7.8081e-02,
1.0468e-02, 1.5473e-01, 3.8676e-02,
-1.0842e-01, 8.6778e-03, 1.4985e-01,
8.1757e-03, -8.2109e-02, 8.5471e-02,
-2.1437e-01, -6.1173e-02, 4.8163e-02,
2.8965e-01, 1.9748e-01, 4.2651e-02,
1.8196e-01, 3.3932e-01, 3.9594e-01,
3.9657e-01, 4.2167e-01, 2.9290e-01,
7.4011e-41, 6.5220e-40, -5.9885e-40,
7.4011e-41, 6.2047e-40, -7.1533e-40,
4.1950e-40, -1.1886e-40, -5.9922e-40,
1.9662e-01, 2.1402e-01, 3.1041e-02,
-1.1079e-01, 1.3361e-01, -2.1608e-01,
-1.7962e-01, -8.0576e-02, -3.1277e-01,
1.0620e-02, 2.4024e-01, 1.0657e-01,
-7.9906e-05, 2.8760e-01, 4.1231e-02,
-1.3261e-02, -1.0868e-01, -1.1267e-01,
-1.0659e-02, -2.6051e-02, -4.5389e-02,
5.8261e-02, 4.0288e-02, 6.7050e-02,
-2.6462e-01, -1.7846e-01, -1.0002e-01,
-6.2904e-02, 1.5275e-01, 4.4282e-03,
1.4446e-01, 1.1814e-01, -8.0349e-02,
2.0331e-02, 3.3014e-02, 1.2710e-01,
1.6084e-01, 3.8819e-01, 1.0854e-01,
-6.8126e-03, 3.5673e-01, 1.8938e-01,
-1.1660e-01, -5.7694e-02, -2.9194e-01,
1.2775e-02, -3.2769e-02, 1.7228e-02,
1.8324e-01, 1.1983e-01, -1.6944e-02,
1.0593e-01, 1.3451e-01, 5.2536e-02,
1.9147e-01, 1.3875e-01, 1.0298e-01,
-2.0871e-01, -1.7197e-01, 1.1342e-01,
-1.7581e-01, 4.0972e-02, 2.9796e-01,
3.2588e-40, -4.3663e-40, -2.6518e-40,
3.2588e-40, -4.3663e-40, -2.6518e-40,
4.1600e-40, -4.4350e-40, -4.8744e-41,
3.7289e-02, 8.1769e-03, 1.7059e-02,
3.7735e-02, 6.6571e-02, -6.6137e-02,
-5.8890e-02, -7.7019e-03, -6.2128e-02,
-4.0751e-02, 1.1710e-01, -1.1586e-01,
-1.2999e-01, -1.6384e-02, -2.1858e-01,
-2.8028e-01, -6.0443e-02, -1.1880e-01,
1.8152e-01, 1.5364e-01, 1.1781e-01,
2.9010e-01, 2.4612e-01, 1.3170e-01,
1.9022e-01, 1.8117e-01, 1.6483e-01,
9.3342e-02, 2.6607e-01, 1.4679e-01,
1.6729e-01, 2.5374e-01, 1.1954e-01,
6.3258e-02, 1.0557e-01, 6.7221e-02,
-5.2017e-02, 1.9628e-01, 1.7243e-01,
-3.2667e-02, 1.5756e-01, 1.9347e-01,
-9.5252e-02, -3.7525e-02, -3.4543e-04,
-4.9759e-02, 4.0383e-02, -2.0231e-02,
-1.1776e-01, 3.4182e-02, 3.6720e-02,
-1.4822e-02, -4.1658e-02, -1.3729e-02,
-1.9215e-02, 2.4427e-02, -9.0638e-02,
-1.4438e-01, -2.1785e-01, -5.1789e-02,
-2.0279e-01, -3.3918e-01, -1.6871e-01,
6.1262e-41, 2.4066e-40, 6.6851e-40,
5.3430e-40, -3.2335e-40, -3.7400e-40,
-6.3256e-40, -4.7491e-40, 2.2854e-40,
-6.8701e-03, -1.4849e-02, 8.6332e-02,
1.1686e-01, 1.8346e-01, 1.8797e-01,
-2.3251e-02, 7.3973e-02, 1.0532e-01,
-6.1838e-02, 5.6667e-02, 8.1584e-02,
-3.8900e-02, 7.0927e-02, 9.5606e-02,
-4.5098e-02, -1.0829e-01, -1.2224e-01,
3.5047e-03, 3.2898e-02, 3.5622e-02,
1.6170e-02, 4.3721e-02, 9.7496e-02,
2.3445e-03, 6.0417e-02, 1.3482e-01,
6.0570e-02, -5.7139e-03, -1.0883e-03,
2.2701e-02, -2.9113e-02, 7.9178e-03,
8.1214e-02, -4.1408e-02, 1.3616e-02,
-4.7985e-02, 1.0304e-02, -3.3236e-02,
-1.6334e-02, -8.1538e-02, 1.8629e-02,
-9.3720e-02, -1.2920e-01, -4.0836e-02
}
,
{
1.0443e-01, 1.5461e-01, -1.4743e-01,
1.6716e-01, 1.0532e-01, -2.3088e-01,
1.0218e-01, 1.2393e-01, -9.6646e-02,
1.7659e-01, -7.3279e-02, 1.9627e-02,
1.7721e-01, -1.4329e-01, -1.2533e-01,
1.6551e-01, -3.4616e-01, 9.5618e-02,
4.5827e-09, 9.3413e-09, 1.7015e-08,
1.2245e-08, 9.9727e-09, 6.7108e-09,
1.9612e-07, 3.9479e-08, 1.1537e-09,
2.2127e-02, 9.2715e-02, -1.2150e-01,
7.5652e-02, 1.1548e-01, -1.2420e-01,
-1.0693e-03, -7.2839e-02, -1.9664e-01,
1.4466e-01, -1.8552e-03, -1.3575e-01,
2.0699e-01, 8.0396e-02, -1.9651e-01,
-4.7075e-02, -5.1259e-02, -8.2593e-02,
-2.2385e-01, 3.0066e-03, -2.2659e-02,
6.1827e-02, 2.5331e-02, -5.3898e-02,
2.7091e-01, 1.0991e-01, -3.3600e-01,
-8.9499e-02, -9.3821e-03, 2.2675e-02,
1.1213e-01, 1.3276e-01, 2.0368e-02,
6.5408e-02, 4.1598e-02, -4.7917e-02,
6.0740e-03, 1.2236e-04, -1.0659e-01,
-1.8072e-02, -9.1082e-02, -9.0414e-02,
4.9052e-02, -1.4298e-01, -3.9721e-02,
1.1840e-01, 2.2503e-01, 2.4587e-02,
9.3023e-02, 6.9650e-02, 1.6798e-01,
-1.5640e-03, 1.6300e-02, 6.3585e-02,
1.4431e-01, 3.7885e-02, 1.6692e-02,
1.7345e-01, 7.2315e-02, 1.8942e-02,
1.1081e-01, 8.2973e-02, -9.7717e-02,
-5.2264e-03, -5.2641e-03, -5.2727e-03,
-5.2809e-03, -5.3125e-03, -5.3153e-03,
-5.2915e-03, -5.3251e-03, -5.3231e-03,
6.0008e-02, 2.0268e-01, 1.3396e-01,
-2.5202e-03, -1.7750e-02, -1.2019e-02,
1.1806e-01, -2.2306e-02, 3.6464e-02,
7.9324e-02, 3.1883e-02, 1.5483e-02,
-4.3537e-02, 1.2204e-02, 1.8905e-02,
-8.1581e-02, -1.1307e-01, -6.0718e-02,
-2.4865e-01, -1.0199e-01, 1.9886e-02,
-1.0519e-02, 6.9972e-02, 4.8012e-02,
-1.5282e-02, 1.1979e-01, 8.7968e-02,
-3.6752e-02, 1.9523e-02, 7.1321e-02,
-5.8295e-02, 5.3242e-02, 1.2773e-01,
-7.9671e-02, 8.3249e-04, 7.4904e-02,
1.1792e-01, 2.2135e-03, -9.0963e-03,
-2.8356e-03, -4.2661e-02, 6.9497e-02,
9.3561e-02, 1.0475e-01, 5.4745e-02,
-8.5901e-02, -2.1969e-01, -1.5572e-01,
3.6473e-02, 1.1097e-01, -2.6830e-02,
1.2199e-02, 1.8917e-01, 1.1906e-01,
1.0664e-01, -2.7005e-01, 1.5492e-01,
-4.1771e-02, -1.6580e-01, 2.9234e-02,
-1.9854e-02, 2.1436e-01, -1.1100e-01,
4.5382e-04, 4.2085e-04, 5.6852e-04,
3.4951e-04, 3.7354e-04, 3.2786e-04,
2.0790e-04, 2.8606e-04, 3.2415e-04,
-1.5500e-02, 2.2865e-02, -3.0070e-01,
1.8467e-01, 2.4899e-01, 1.4812e-02,
-1.2318e-01, 2.3175e-01, 7.2244e-02,
1.6713e-01, 1.9089e-02, -2.7494e-01,
1.0202e-01, 2.9200e-01, -3.6055e-03,
1.3265e-01, 2.2551e-01, 1.9897e-01,
-3.9474e-02, 1.6262e-01, 1.6726e-01,
-8.6222e-02, 2.0573e-01, -7.3247e-01,
-9.5391e-02, 3.8933e-01, 1.5861e-01,
-1.2202e-01, -6.4735e-02, -1.1762e-01,
-2.2427e-02, -1.9171e-01, -1.6092e-01,
3.2356e-01, -2.2234e-01, -1.3743e-01,
-1.1493e-01, -2.4936e-02, 2.9212e-02,
-9.8112e-02, -1.8021e-02, -1.0507e-01,
-1.0168e-01, 1.1759e-01, -9.8203e-02,
-2.8871e-02, 1.3249e-01, 7.8378e-02,
-1.1012e-01, -4.0596e-02, 5.4202e-02,
4.9022e-02, -1.1744e-01, 9.8888e-02,
1.3343e-02, 1.4358e-01, -8.7142e-02,
1.9952e-01, 3.3708e-02, 2.0721e-02,
2.6527e-02, -2.3822e-01, 2.4706e-01,
-3.2750e-04, -2.8475e-04, -6.3494e-05,
-2.2378e-04, -1.8046e-04, -1.9242e-05,
-4.2124e-05, -2.2062e-05, 4.5500e-07,
1.1692e-01, 4.0366e-01, -1.8709e-02,
8.2700e-02, 1.7884e-01, -1.3520e-01,
3.7758e-02, 3.7048e-02, -2.8109e-01,
-2.3438e-01, 5.9423e-02, -1.7300e-01,
1.0343e-02, 7.2307e-02, -4.3852e-01,
-5.7429e-02, -4.9136e-02, -8.0327e-02,
8.1094e-02, 2.9118e-02, 1.6677e-01,
1.2155e-01, 6.5358e-01, 2.4544e-01,
3.1163e-02, 3.7463e-02, -2.6613e-01,
1.2723e-01, 1.2541e-01, 1.4319e-02,
1.9055e-01, -5.7441e-02, 1.1146e-01,
-1.0690e-02, -1.7567e-01, -1.2238e-01,
-2.0879e-01, -6.5278e-02, -7.9327e-02,
-1.6564e-01, -1.3659e-01, -2.6231e-01,
-3.1916e-01, -2.6553e-01, -9.8647e-02,
-1.0617e-01, 1.2782e-01, -2.1053e-02,
-1.2329e-01, 1.4952e-01, -1.7466e-02,
-1.6969e-01, 3.6980e-02, -6.7732e-02,
-3.1220e-02, 4.0615e-02, -1.5251e-01,
-2.0017e-01, 2.2421e-01, -2.5682e-02,
-6.5873e-02, 1.8346e-01, 1.2982e-02,
1.4021e-06, -1.6929e-05, -8.4696e-05,
1.9580e-05, 2.9943e-06, 3.0084e-06,
2.0769e-04, 1.4661e-05, 2.9503e-06,
-1.4485e-01, 1.8841e-01, -1.7954e-01,
2.1551e-01, 2.2601e-01, -8.6689e-03,
8.6926e-02, -6.8989e-02, -1.2683e-01,
-8.7712e-02, 6.3176e-02, 1.1983e-01,
1.0790e-01, 6.6418e-02, 6.5849e-02,
1.2483e-01, 1.2428e-01, 4.4994e-02,
1.5139e-01, -1.2116e-01, -3.5497e-01,
-6.1889e-02, 3.4088e-01, 1.3148e-01,
-1.6478e-01, 4.4477e-02, -1.1979e-01,
3.8343e-02, 1.7992e-01, 3.6790e-01,
3.0426e-01, 1.1235e-01, 4.9815e-01,
2.6290e-01, 1.9703e-01, 1.5881e-01,
-6.4678e-03, 2.4401e-01, 1.9266e-01,
-1.4089e-01, 1.2323e-01, 4.4340e-02,
-8.8856e-02, 8.4036e-02, -9.8488e-02,
-1.7377e-03, -1.7654e-03, -1.7223e-03,
-1.7651e-03, -1.7919e-03, -1.7491e-03,
-1.7172e-03, -1.7446e-03, -1.7041e-03,
-3.0384e-04, -2.9297e-04, -2.4838e-04,
-3.2961e-04, -3.1678e-04, -2.7009e-04,
-3.1665e-04, -3.0492e-04, -2.6122e-04,
3.7109e-40, -3.7915e-40, -5.2536e-40,
5.8286e-41, -5.6108e-40, 4.3331e-40,
-3.0184e-42, -4.8987e-40, -5.1788e-40,
-4.0457e-04, -4.3257e-04, -4.1616e-04,
-4.2268e-04, -4.5118e-04, -4.3407e-04,
-3.9446e-04, -4.2199e-04, -4.0650e-04,
-1.1253e-16, -1.1328e-14, -2.0489e-14,
-3.0346e-19, -1.7189e-16, -4.5141e-16,
-2.4957e-30, -1.8191e-23, -3.5882e-22,
-3.1610e-36, -1.7544e-24, -2.2187e-21,
-4.2887e-19, -1.5526e-15, -1.5160e-14,
-1.7750e-16, -6.8066e-14, -3.3764e-13,
-6.9570e-24, -5.1139e-23, -2.9335e-23,
-1.9091e-22, -1.0323e-21, -4.5931e-22,
-2.0010e-22, -9.3710e-22, -3.5622e-22,
-2.9470e-04, -2.9081e-04, -2.5958e-04,
-3.2290e-04, -3.1810e-04, -2.8461e-04,
-3.1795e-04, -3.1356e-04, -2.8121e-04,
6.1623e-02, 1.7057e-01, 8.0478e-02,
1.2624e-01, 1.8468e-01, 2.1901e-02,
7.6033e-02, 1.3455e-01, 8.4037e-02,
8.4434e-02, -1.7069e-02, -7.8318e-02,
4.9244e-02, 4.4782e-02, -6.9747e-02,
1.2915e-01, 1.1453e-01, -6.5243e-02,
-5.0985e-03, -5.1407e-03, -5.1687e-03,
-5.1185e-03, -5.1511e-03, -5.1712e-03,
-5.0986e-03, -5.1272e-03, -5.1409e-03,
-1.8186e-02, 6.2680e-02, 3.3235e-02,
1.3398e-02, 1.6497e-01, 4.3523e-02,
-2.4101e-02, 1.3316e-01, 1.8373e-02,
-6.2677e-04, 6.5026e-03, 2.5948e-02,
6.6542e-02, 1.2352e-01, 1.5155e-02,
-8.6237e-02, -2.0907e-02, 1.0237e-02,
-1.7807e-01, -8.6196e-02, -3.2408e-02,
-8.1946e-03, -1.3957e-02, -1.6733e-01,
2.6269e-02, 1.6817e-01, 9.4029e-02,
3.4005e-02, -1.2833e-02, -1.2038e-01,
-4.8950e-02, 3.9857e-02, 1.4048e-02,
-6.4758e-02, 9.9603e-02, 1.0748e-01,
-1.0850e-02, 9.8875e-02, -4.4439e-02,
9.1219e-02, 6.6400e-02, -6.7693e-02,
5.3318e-02, 1.1838e-02, -1.5164e-01,
-5.8568e-02, 1.1249e-01, -3.8286e-02,
-7.1122e-02, 9.5799e-02, 3.8521e-02,
-1.3846e-01, 1.4167e-01, -3.5500e-03,
-1.0343e-01, -3.3025e-02, 3.7186e-02,
-2.0769e-03, 1.3558e-01, -1.3009e-01,
1.0167e-02, 1.5358e-02, -9.8009e-02,
2.4123e-05, -1.1800e-05, -1.4180e-04,
3.5217e-05, -6.3838e-06, -1.2243e-04,
8.5525e-05, 2.1599e-06, -5.3290e-05,
-1.4471e-01, 2.0111e-02, -1.2449e-01,
5.3368e-02, 3.2918e-01, 1.4034e-01,
-1.1833e-01, -1.9225e-02, -1.2658e-01,
-2.6966e-01, 1.1751e-01, 9.7072e-02,
-1.9929e-01, 9.7986e-02, -5.1240e-02,
-9.5073e-02, -6.8070e-02, -2.1318e-01,
9.5305e-02, -4.0551e-02, -1.0936e-01,
5.2687e-02, 4.5340e-01, 2.3531e-01,
-1.3385e-02, 1.5922e-01, -1.8371e-01,
-1.2203e-01, -7.2567e-02, -3.0000e-01,
-3.4356e-02, -1.3471e-01, -9.0995e-02,
-2.5230e-01, -2.4846e-01, -1.8529e-01,
-1.6962e-01, 1.0905e-01, 1.1557e-01,
-1.4405e-01, 8.9191e-02, 1.1715e-01,
-1.3237e-01, 5.2092e-02, -1.2227e-01
}
,
{
2.0013e-01, 2.2105e-01, 1.9196e-01,
6.8158e-02, 1.7154e-01, -8.6677e-02,
9.2652e-02, 1.0789e-01, 1.6745e-01,
-2.9254e-01, -7.6815e-02, 5.8812e-02,
-4.6466e-02, 1.3941e-02, 2.3353e-01,
-1.5033e-01, 7.5167e-02, 1.4433e-01,
2.8008e-02, 3.1625e-01, 3.2877e-02,
-5.8835e-02, -1.7305e-01, -6.1558e-02,
-1.2227e-01, 3.9931e-02, 3.0300e-02,
2.3004e-01, 4.1834e-02, -5.7790e-02,
-2.2861e-01, 2.9314e-01, 1.6884e-01,
-2.8009e-02, 4.7550e-02, -4.4542e-02,
-2.4674e-01, -1.5483e-01, 3.2653e-02,
-2.1574e-01, 3.1083e-01, -1.4025e-03,
1.7354e-02, 5.6417e-02, 1.0844e-01,
-4.2681e-40, 4.5893e-42, -7.4234e-40,
1.7665e-40, 4.0151e-40, 4.6269e-40,
2.5452e-40, -7.0179e-40, -1.2338e-40,
-1.4957e-01, -1.9087e-02, 7.1170e-02,
-1.4435e-01, 8.9560e-02, 1.3879e-01,
-3.6992e-02, 5.9822e-02, 1.9241e-02,
-2.4402e-03, 1.5097e-01, 6.3958e-02,
-1.7630e-01, 3.6009e-01, -2.0383e-01,
-8.5106e-03, 4.0863e-03, -2.7575e-02,
7.8942e-02, -1.8640e-01, -6.7715e-02,
7.2777e-02, -1.3804e-01, -7.0332e-02,
1.5185e-01, -4.3530e-02, 1.4502e-01,
-3.2928e-02, -3.0583e-02, 9.2061e-02,
1.2493e-01, 1.0400e-01, 1.3780e-01,
1.4438e-01, 8.2051e-02, 1.6159e-02,
2.7478e-02, 1.7768e-01, 2.5945e-01,
-3.4662e-01, 2.0330e-03, 8.8118e-02,
-2.9628e-01, -1.3212e-01, -1.8145e-02,
-1.9330e-01, 3.9238e-02, -4.6944e-02,
-1.5668e-01, -5.7104e-02, 1.9558e-01,
6.5305e-02, 5.9933e-02, 7.7337e-02,
-2.4906e-02, -1.1235e-01, 1.3822e-02,
-3.9988e-02, -9.1882e-03, 1.9204e-02,
1.0504e-01, 4.6820e-03, -2.1836e-02,
-2.6953e-40, 2.5334e-40, -1.3028e-40,
1.4110e-41, 5.6841e-40, 3.6368e-40,
-1.1746e-41, -7.0658e-41, -3.9413e-40,
1.5025e-02, 7.4419e-02, 9.5652e-02,
5.0297e-02, 6.6704e-02, 5.7316e-02,
2.5102e-02, 1.1985e-01, 2.6043e-02,
3.3297e-02, -7.7374e-02, -1.1114e-01,
-7.5586e-02, -1.9338e-02, -1.3739e-02,
4.5616e-02, -6.4946e-02, -6.9372e-02,
-7.5874e-03, -1.1141e-01, -2.9135e-02,
-6.9436e-03, -1.4418e-02, 1.6436e-03,
-1.3051e-01, -1.3324e-01, -9.3934e-02,
1.2184e-01, 1.9386e-01, 1.7995e-01,
-2.7452e-02, 9.9736e-02, 1.0020e-01,
-6.3290e-02, -2.1447e-02, -1.7005e-01,
1.3857e-01, 2.3338e-01, 2.5410e-01,
2.3002e-01, 1.9551e-01, 1.4452e-01,
4.7040e-01, 2.2647e-01, 1.5215e-01,
2.6927e-02, -2.1304e-01, -1.4762e-01,
-5.6998e-02, 2.9064e-01, 1.8085e-01,
8.9393e-02, -1.7463e-01, -2.7095e-01,
3.8434e-02, 1.7198e-01, -1.8122e-02,
-1.3857e-01, 1.9418e-01, 1.5019e-01,
-5.6337e-02, -5.3265e-01, 3.2122e-01,
-2.4484e-40, -5.3707e-40, 1.5854e-41,
5.1791e-40, -4.1875e-41, 5.6732e-40,
1.3048e-40, 1.6452e-40, -4.5028e-40,
-3.0692e-02, 1.8569e-01, 2.0327e-01,
-7.4756e-02, -5.1765e-02, 4.2475e-02,
-9.0675e-02, -3.0438e-01, -3.5088e-01,
-1.9129e-02, -1.5663e-03, 4.9895e-02,
-1.9441e-02, 9.3237e-02, 1.2910e-01,
-2.3919e-02, -4.0539e-01, 2.8167e-02,
2.0203e-01, 3.3424e-02, 1.7927e-02,
4.1923e-02, -1.6967e-01, 2.5656e-02,
-1.5869e-01, -1.8727e-01, 2.7860e-03,
-4.0276e-02, -6.7792e-03, 3.3699e-02,
-6.7044e-03, 1.7686e-02, 2.9786e-02,
-1.5623e-02, 3.7904e-02, 2.4737e-02,
-1.2282e-01, -3.6563e-02, 4.1976e-02,
-9.9622e-03, 8.8981e-02, 2.1364e-02,
-8.5668e-02, -1.6803e-01, -4.4974e-02,
1.3164e-01, 4.1294e-01, 1.8897e-01,
2.1991e-01, 1.6247e-02, 1.1569e-01,
-3.0142e-02, 1.4069e-02, 3.6646e-02,
-2.6816e-02, -3.9767e-02, 1.4061e-01,
-1.3603e-01, -2.0649e-01, 7.5837e-02,
-1.6984e-02, -8.3800e-03, 2.3652e-04,
1.5049e-40, 4.6504e-40, 1.3625e-40,
-7.5358e-40, -3.4257e-40, 9.9763e-41,
4.7243e-40, 7.4890e-40, -7.9440e-42,
-5.9692e-02, -2.8047e-02, 2.3795e-02,
-3.5284e-02, 1.1448e-02, 5.0302e-04,
-3.5066e-02, 4.6185e-02, 1.2167e-02,
3.7583e-02, -3.6598e-02, 1.0206e-01,
-9.6229e-02, -1.5977e-01, 4.9157e-02,
3.7293e-02, 5.8766e-02, 1.0448e-02,
1.1490e-01, 1.4459e-01, 8.6936e-02,
2.8609e-01, -4.8108e-02, 9.0023e-02,
6.7941e-02, -5.7148e-03, 1.0021e-01,
7.3816e-02, 7.3794e-02, 8.0970e-03,
2.8307e-02, 3.6635e-03, -1.1769e-01,
4.1374e-02, 3.9933e-02, -4.4292e-02,
5.9423e-02, 1.9009e-01, -2.3735e-01,
-2.6670e-01, 5.8789e-01, -2.0048e-01,
-3.7082e-01, 1.8045e-01, 5.4820e-02,
-6.3567e-01, 2.0098e-01, 1.0653e-01,
-2.5056e-01, 6.5065e-01, -4.0471e-01,
5.4715e-02, 2.4375e-01, -2.7402e-01,
1.5982e-01, 1.0923e-01, 2.1566e-01,
2.0239e-01, -9.0221e-02, -4.4606e-01,
1.0550e-01, 5.4666e-02, -2.7134e-01,
-4.6424e-40, 2.9137e-40, 7.4968e-41,
1.2376e-41, -5.6213e-40, -6.3457e-40,
2.5404e-40, 2.0013e-40, 3.5611e-40,
5.5423e-02, 3.9843e-02, -1.7509e-01,
5.4480e-02, 5.0331e-02, -1.6793e-01,
6.6093e-02, 3.0163e-02, -8.2023e-02,
-1.5490e-01, 1.7457e-01, 2.7832e-01,
1.1482e-01, 2.5759e-01, -2.4199e-01,
-9.3891e-02, 9.1921e-02, -6.4480e-03,
1.9266e-01, 5.2907e-02, 7.0289e-02,
1.3582e-01, 6.4246e-02, 1.4989e-01,
6.2013e-03, -6.8884e-02, 6.8734e-02,
-1.0483e-01, -7.7134e-02, -3.6204e-02,
1.7590e-02, 5.0844e-02, 1.4234e-01,
7.2913e-02, 6.0726e-02, 6.4414e-02,
-8.5021e-02, -1.0621e-03, 5.5851e-02,
2.4666e-01, 6.5652e-02, -1.8180e-02,
1.5225e-01, 1.2928e-01, 3.1578e-03,
1.1468e-01, 1.9544e-01, 6.6637e-02,
6.3430e-02, 2.0542e-01, 7.0876e-02,
3.4779e-02, 1.0037e-02, -2.2134e-02,
-6.9304e-02, 1.1184e-01, -3.7015e-02,
-1.7634e-01, 1.2475e-01, 9.1947e-02,
-6.0550e-02, -1.3904e-01, 7.5192e-02,
-2.2871e-40, 4.7367e-41, -1.0711e-40,
-2.8662e-40, 4.0542e-41, 3.3067e-40,
-4.4395e-41, -7.2684e-41, 1.8695e-40,
-1.6702e-01, -2.6654e-01, 8.7902e-03,
-2.0108e-01, -3.8093e-01, -8.3700e-02,
-7.5433e-02, -2.0689e-01, 2.7951e-02,
2.9938e-03, 1.1378e-01, 7.1598e-02,
-1.6031e-01, 1.3475e-01, 1.5800e-01,
-7.2019e-02, -1.1663e-01, 8.0692e-02,
1.0610e-01, 1.1163e-02, -1.4959e-01,
-1.1576e-01, -8.5645e-02, 4.0414e-02,
5.6245e-02, 1.7056e-01, 2.5734e-01,
-6.1086e-02, -7.0851e-02, 7.6851e-02,
-2.7595e-02, -6.0890e-02, 4.7472e-02,
7.1059e-03, 6.0942e-05, 7.4915e-02,
1.9350e-01, -1.8458e-02, -2.3040e-02,
6.3477e-02, 1.1923e-01, 9.9319e-02,
6.4839e-02, 2.7973e-01, 1.2902e-01,
-1.7829e-01, 5.7083e-03, -6.1680e-03,
-1.1256e-01, -2.7951e-02, -2.1544e-01,
-2.1614e-02, -7.1468e-02, -2.2054e-02,
-8.7543e-02, -1.2982e-01, 1.9386e-01,
-5.7157e-03, -1.0108e-01, 1.4467e-01,
-6.5742e-02, -7.2054e-02, 1.7924e-01,
7.5418e-40, 6.3043e-40, 4.9815e-40,
-1.0952e-40, 3.0327e-40, -2.3848e-40,
4.1302e-40, 2.0150e-40, -1.6509e-40,
-1.3985e-02, -1.0550e-01, 5.8772e-02,
-1.7108e-02, -7.3644e-02, 3.3014e-02,
-1.8224e-03, 2.8931e-03, 9.2762e-02,
4.1531e-02, -1.5139e-01, -1.7773e-01,
9.6548e-02, -1.1914e-01, -4.6536e-02,
8.6754e-02, -4.0057e-03, 1.8983e-01,
1.6545e-01, -4.7311e-02, -7.2455e-03,
3.7567e-01, 1.8883e-01, -7.4325e-02,
-5.8252e-02, -1.3811e-02, -7.0470e-02,
-3.2943e-02, -7.0770e-02, -1.4700e-01,
1.7043e-02, 9.4331e-02, 4.2857e-03,
4.1247e-03, 1.6690e-01, 4.2146e-02,
1.1420e-01, -7.4456e-02, -3.8763e-02,
1.6807e-01, 9.3636e-03, -1.1796e-01,
1.7703e-01, 1.1386e-03, -6.8707e-02,
1.0259e-01, -1.8918e-02, 6.5902e-03,
1.2421e-02, -7.8960e-02, 2.1766e-02,
1.3062e-01, 4.6001e-02, 2.4199e-01,
-1.2955e-02, -1.9329e-01, 5.2074e-03,
5.9446e-02, 1.8832e-01, 2.2094e-01,
-1.0954e-01, -8.1867e-02, -4.3324e-02,
-3.9596e-41, 2.8677e-40, -6.5843e-40,
4.2812e-41, -3.5323e-40, 4.8298e-40,
7.6351e-40, -2.4759e-40, 7.3030e-40,
-1.1284e-01, -8.4171e-02, -1.5935e-01,
-3.2299e-02, 1.5427e-01, 8.9029e-02,
-3.8815e-02, 1.3098e-01, -4.3065e-02,
-2.5276e-01, -1.7018e-01, 9.7901e-02,
1.4218e-01, 3.1236e-01, 2.9636e-01,
-2.3613e-02, -5.5258e-02, -2.0550e-01
}
,
{
0.0333, 0.1145, -0.0922,
0.1185, 0.4533, -0.2015,
-0.0774, 0.1759, -0.0496,
0.0954, -0.0499, 0.0824,
0.1059, 0.0173, -0.0586,
-0.0666, -0.0287, -0.0652,
-0.0558, -0.1362, 0.0015,
0.1277, 0.1020, -0.1369,
0.0020, -0.0103, -0.0804,
0.0507, 0.1404, -0.0241,
0.0520, 0.1239, 0.0633,
-0.0268, 0.0335, 0.0883,
-0.0549, -0.1022, -0.0515,
-0.0163, -0.1167, -0.0442,
0.0858, -0.0804, -0.0014,
0.0354, -0.0666, -0.2105,
-0.0950, 0.1578, -0.0920,
-0.1303, 0.0299, -0.0195,
-0.0281, -0.1993, -0.0154,
0.0796, 0.0503, 0.0954,
0.0540, 0.0212, 0.0389,
-0.1387, 0.1091, -0.1212,
0.1556, 0.3573, 0.0976,
-0.0587, -0.2070, 0.2067,
0.0138, 0.0051, -0.1008,
0.2877, 0.1079, -0.0681,
0.0953, -0.0739, -0.2349,
0.1482, 0.0657, 0.0480,
0.1590, -0.0009, 0.1402,
0.0700, 0.0435, 0.1190,
0.0957, 0.0117, -0.1010,
0.1790, -0.0200, -0.0765,
0.0797, 0.1455, -0.0340,
0.0008, -0.0267, 0.0089,
0.0644, 0.0647, 0.0397,
0.0463, -0.0116, -0.0771,
0.2237, 0.0324, 0.0192,
-0.0082, -0.0345, 0.0294,
0.0719, -0.0185, 0.1008,
-0.0307, 0.0134, -0.0747,
0.0776, -0.1485, 0.0135,
0.0965, -0.0665, -0.1263,
-0.0101, -0.0097, -0.0144,
-0.0022, -0.0083, 0.0277,
0.0136, -0.0076, 0.0314,
-0.0008, 0.0722, -0.0704,
0.0053, 0.0767, 0.0368,
-0.0189, -0.1354, 0.0231,
-0.1416, 0.1945, -0.1756,
0.2058, 0.0401, -0.1348,
-0.0945, -0.2530, -0.3082,
-0.0096, 0.0871, 0.0699,
-0.0092, 0.0423, 0.0995,
-0.0914, -0.0570, -0.0718,
-0.0739, -0.2749, -0.2320,
0.1488, -0.2698, -0.1977,
0.1445, -0.1655, -0.0758,
0.2035, -0.0138, 0.0332,
0.0282, -0.2247, -0.0945,
-0.0614, -0.2484, -0.0595,
-0.1174, -0.1252, 0.1969,
-0.1101, -0.2950, -0.2164,
-0.0348, -0.0891, 0.1250,
0.0195, 0.0050, 0.0300,
-0.0508, -0.0316, -0.0194,
0.0199, 0.0345, 0.0444,
-0.0022, -0.0529, 0.1604,
0.0756, -0.2015, -0.2117,
-0.0837, -0.1270, 0.1330,
0.0286, 0.0952, 0.1082,
0.0724, -0.0446, -0.1156,
0.0545, 0.0444, -0.0291,
0.0759, 0.1110, 0.0944,
0.1615, 0.4302, -0.1060,
0.0418, -0.0281, -0.1378,
-0.0757, -0.0527, -0.1578,
0.0123, -0.0427, 0.1504,
0.0694, 0.0690, 0.0203,
0.2132, -0.3449, 0.0936,
0.2491, 0.0279, -0.0884,
-0.0447, 0.1589, -0.0054,
-0.0246, 0.1247, 0.0403,
0.0513, -0.0541, -0.1141,
0.0712, -0.1174, -0.0051,
0.2304, 0.2431, -0.0517,
-0.1548, -0.0401, 0.2032,
-0.0087, -0.1676, -0.0600,
0.1094, -0.0329, 0.0530,
-0.0580, 0.1499, -0.0806,
-0.0086, -0.1400, -0.0636,
0.0708, -0.1003, -0.1113,
-0.0732, -0.1199, 0.0060,
-0.0534, -0.0011, 0.0965,
-0.0268, 0.0116, -0.1161,
0.0787, 0.3925, -0.0819,
-0.0041, -0.0892, -0.2063,
-0.1296, 0.0924, -0.0079,
0.5625, 0.4013, 0.1645,
-0.0137, -0.1935, 0.2714,
0.0980, 0.0016, -0.1461,
0.1576, 0.0305, -0.1450,
0.1503, -0.0303, -0.1403,
0.0262, -0.0077, 0.0459,
0.2718, 0.0754, 0.2404,
0.1381, -0.1499, 0.0016,
0.1454, -0.1278, -0.0085,
0.1674, -0.0834, 0.1993,
0.0874, -0.0598, -0.0188,
0.2003, 0.3296, 0.0153,
-0.0154, 0.5550, -0.0945,
0.0489, 0.0415, -0.0940,
0.0164, 0.0791, 0.1077,
-0.0893, 0.1231, 0.0473,
-0.0319, 0.1444, 0.1690,
-0.0518, -0.1404, -0.1778,
-0.0170, 0.1395, -0.0234,
0.0128, -0.0112, -0.0472,
0.1039, 0.1982, -0.0272,
0.0282, -0.1199, -0.2622,
-0.0449, 0.0239, -0.1030,
-0.0840, -0.1044, -0.0646,
0.0588, 0.1937, -0.2494,
0.0180, 0.0747, 0.1530,
0.0500, 0.1756, 0.0491,
-0.1113, -0.0079, 0.0854,
-0.1493, -0.0559, -0.0373,
0.1972, -0.3158, -0.0500,
0.1932, 0.3177, -0.0018,
-0.0516, -0.1144, 0.0686,
0.0175, 0.0598, 0.0345,
-0.0667, -0.1078, 0.0384,
0.0897, 0.2198, -0.0531,
-0.2596, -0.1997, 0.0195,
0.0332, 0.4098, 0.1381,
0.1985, -0.0669, -0.1275,
-0.0751, -0.2388, -0.0672,
0.0090, 0.0891, -0.0362,
0.1392, -0.0518, 0.2039,
0.2079, -0.1202, 0.0707,
0.0498, -0.1237, -0.0665,
-0.0398, -0.1557, -0.0928,
0.0505, 0.1220, 0.0352,
-0.0674, -0.1159, 0.0724,
-0.0331, -0.1751, 0.0766,
0.0992, -0.0763, 0.0090,
-0.1223, 0.2621, -0.2029,
0.0509, -0.0279, -0.1061,
0.0598, 0.0353, -0.1610,
0.0165, 0.0835, 0.0704,
-0.0079, -0.0982, 0.0187,
0.2331, -0.1929, 0.0684,
-0.0507, 0.1476, -0.0886,
-0.0275, 0.1658, 0.0697,
-0.1123, -0.0069, -0.0851,
-0.0377, -0.0917, -0.0629,
-0.0420, 0.0506, 0.1111,
0.1086, 0.1351, -0.0851,
0.0466, 0.2750, 0.0185,
-0.0208, 0.2090, 0.0271,
0.0217, -0.0548, 0.0078,
-0.0609, 0.1029, -0.1641,
0.1392, 0.0115, 0.0317,
-0.0570, 0.1060, 0.1814,
-0.2015, -0.1301, 0.1082,
0.2452, -0.1815, -0.0046,
0.0103, -0.0466, -0.0895,
0.0158, -0.0594, -0.1386,
-0.0073, -0.0719, -0.0716,
0.1308, -0.0206, 0.0511,
-0.0437, -0.0763, 0.0287,
0.0493, -0.1239, 0.0219,
-0.0041, 0.0373, 0.0262,
0.0078, -0.0249, -0.0284,
0.0598, -0.0205, -0.0276,
0.0115, -0.1778, -0.0395,
0.1673, -0.0036, 0.2334,
0.0706, -0.0694, 0.0177,
0.1123, -0.0043, 0.0716,
-0.0894, -0.1609, 0.0334,
-0.0046, -0.2006, -0.0977,
-0.0127, 0.1198, -0.0339,
-0.0283, 0.1354, 0.1637,
-0.1696, 0.0187, -0.2621,
0.0496, 0.2834, 0.0423,
0.1126, 0.3962, 0.1660,
-0.0750, 0.1955, 0.0590,
-0.1088, -0.1146, -0.1219,
0.1360, 0.1524, 0.0498,
-0.1151, 0.0219, -0.0063,
-0.0821, 0.0247, -0.1065,
0.1153, 0.2085, 0.0618,
-0.0383, 0.0527, -0.2067
}
,
{
1.8014e-01, 2.1908e-01, -2.1088e-03,
1.7345e-01, 2.7654e-01, 1.3607e-02,
1.1363e-01, 9.9105e-02, -6.5730e-02,
-3.5679e-02, 9.6072e-03, 4.0721e-02,
-1.8771e-02, -2.3484e-04, -1.0230e-02,
1.6965e-02, -1.3032e-02, -6.3906e-02,
-4.5686e-02, -3.6733e-02, -4.8873e-02,
4.0752e-02, 2.1615e-02, -1.4822e-02,
1.1689e-01, 3.0153e-02, -5.0163e-04,
-7.0394e-03, -1.2387e-01, -8.9243e-02,
-1.8312e-01, -1.3868e-01, -6.2618e-02,
-8.1627e-02, -2.0480e-01, -3.0740e-01,
4.4296e-02, 3.8572e-02, 4.3754e-02,
1.7538e-01, 5.3284e-02, -7.5663e-03,
1.9670e-01, -1.2397e-01, -1.6266e-01,
1.4575e-01, -5.7771e-02, 2.7619e-02,
2.2757e-02, -4.8910e-01, -2.6201e-01,
3.6513e-02, -2.0704e-01, -1.3225e-01,
-6.7533e-02, 1.1289e-02, 7.1316e-02,
-7.6847e-02, 6.8128e-02, 7.4717e-02,
1.1269e-01, 2.9978e-02, 3.2132e-02,
-5.4557e-02, -4.4599e-02, 4.1835e-02,
5.7964e-02, -2.1246e-03, 1.5007e-01,
1.8432e-01, 1.1463e-01, 2.2691e-01,
9.6166e-02, 4.7887e-02, -3.8399e-02,
5.8153e-02, -2.0255e-02, -1.1362e-01,
2.6402e-02, 2.5562e-02, 1.9096e-02,
1.1588e-01, 1.4540e-01, 1.1948e-01,
1.0360e-01, 5.9083e-02, 1.9263e-01,
1.6953e-01, 2.7390e-02, 9.7883e-02,
1.5059e-01, 6.7593e-02, -4.5843e-03,
8.7031e-02, -2.0926e-03, -6.3056e-02,
-6.6960e-02, -5.2056e-02, -7.3570e-02,
1.4361e-02, 1.1059e-01, -4.9720e-02,
4.4270e-02, 3.9995e-02, 4.3101e-03,
-1.1042e-01, 4.5028e-02, -8.9124e-02,
-1.2906e-01, -7.6972e-02, -6.5449e-03,
-1.9269e-01, 2.8349e-01, 1.1573e-01,
-1.7983e-01, 9.7615e-02, 9.4003e-03,
-4.7802e-02, -1.5889e-01, -1.2693e-01,
7.4717e-02, 2.8655e-01, -7.2637e-02,
1.5837e-02, 8.7125e-02, -1.2198e-01,
-1.7754e-02, -5.6443e-02, -9.8661e-03,
6.3040e-02, 2.0249e-02, -3.5368e-02,
9.7756e-03, 2.6760e-02, -5.5172e-02,
-1.0406e-02, 4.8313e-02, 2.4717e-02,
-5.2851e-02, 6.8496e-02, -2.5933e-02,
4.5932e-02, 5.9892e-02, 1.9200e-02,
-5.1316e-40, -5.1811e-40, -1.5144e-40,
-6.7758e-38, -5.4608e-40, -3.9680e-40,
-1.9155e-39, 2.0423e-41, 1.5256e-41,
-2.5559e-08, -3.2461e-08, -2.6821e-08,
-3.6885e-08, -4.6896e-08, -3.9086e-08,
-3.4305e-08, -4.4160e-08, -3.7187e-08,
-3.7416e-40, 3.6550e-40, 5.0727e-40,
-1.6722e-40, 3.9228e-40, 5.4548e-40,
-5.7512e-40, -2.8156e-40, 9.4571e-41,
-4.7040e-40, -1.6974e-40, 6.3849e-40,
-3.7322e-40, 2.6014e-40, 2.3080e-40,
-2.8395e-40, -3.7116e-40, 4.4393e-40,
1.1597e-40, 4.3291e-40, 3.8219e-40,
3.3393e-40, 3.1747e-40, -1.8400e-36,
-5.5215e-40, 1.7648e-40, -1.6540e-35,
-3.0953e-40, 5.3063e-40, -1.6454e-40,
2.1341e-40, 2.0790e-40, -3.0226e-40,
-2.6807e-40, -1.6601e-40, 5.1829e-40,
-1.8897e-40, -4.5956e-41, 5.3784e-40,
-2.5661e-40, -2.1726e-40, 1.2010e-40,
1.8263e-41, 1.1214e-40, -3.7693e-40,
-4.2596e-40, 1.8854e-40, 5.5010e-40,
-6.6262e-40, -4.8808e-40, 3.3123e-40,
5.9379e-41, 2.3249e-40, 4.4504e-40,
-8.4836e-04, -8.4397e-04, -5.8640e-04,
-8.3506e-04, -8.0192e-04, -5.3901e-04,
-8.3539e-04, -7.8069e-04, -4.8720e-04,
-3.4706e-04, -4.4640e-04, -5.2353e-04,
-4.4518e-04, -5.3374e-04, -5.2734e-04,
-5.8780e-04, -5.8730e-04, -5.4362e-04,
-5.2452e-04, -5.4578e-04, -5.6266e-04,
-4.2387e-04, -4.4643e-04, -4.8936e-04,
-3.5880e-04, -3.7886e-04, -4.1998e-04,
-2.4479e-04, -4.0736e-04, -3.1189e-04,
-3.4922e-04, -4.0173e-04, -2.5042e-04,
-5.7091e-04, -5.2665e-04, -2.3293e-04,
-2.8505e-04, 9.7283e-05, 3.1209e-04,
-2.7463e-04, 1.8704e-04, 4.4351e-04,
-9.1436e-05, 3.2602e-04, 5.7573e-04,
-4.0112e-04, -4.2566e-04, -2.4300e-04,
-9.9362e-05, -6.5499e-05, 3.2872e-05,
1.1584e-04, 2.3417e-04, 3.4427e-04,
-7.5767e-05, 3.9768e-06, 6.2201e-05,
2.3151e-05, 2.5595e-04, 3.4038e-04,
-1.3871e-05, 3.0295e-04, 4.4170e-04,
-1.7802e-04, -4.5376e-04, -5.1847e-04,
-5.0687e-04, -5.5837e-04, -2.5917e-04,
-5.3992e-04, -7.1375e-04, -4.8728e-04,
-1.7543e-01, -3.4151e-01, -3.2619e-02,
-1.9701e-02, -1.5494e-01, -1.6534e-01,
3.5632e-02, -1.0897e-01, -3.8379e-02,
-6.1420e-02, -1.0735e-01, 1.4730e-01,
7.4386e-02, -1.0487e-01, 7.9646e-02,
1.7130e-02, 4.4391e-02, -5.1959e-03,
4.5682e-02, -1.1543e-01, 9.4035e-03,
-3.4376e-01, -1.1961e-01, 1.0099e-01,
1.1335e-01, 7.5840e-02, 1.0675e-01,
4.9539e-02, 8.7406e-02, 4.4951e-02,
1.8111e-01, 2.6406e-01, -1.5924e-02,
-1.1464e-01, 8.4579e-04, -6.6811e-02,
-8.9635e-03, 1.8236e-03, 3.6561e-02,
-7.0281e-02, 2.9717e-01, 3.1836e-02,
-1.3647e-01, -6.5627e-02, 9.3063e-02,
-2.1851e-01, -6.0226e-02, -1.0326e-01,
5.3441e-02, 1.9103e-01, -5.7999e-02,
-3.3512e-02, 1.5496e-01, -1.1111e-01,
2.3256e-03, -1.5004e-01, -9.1248e-02,
-9.7706e-02, 1.9549e-01, -1.5403e-01,
-1.5327e-01, 8.3335e-02, 5.6111e-03,
-1.5707e-01, 8.0277e-03, -7.3955e-02,
-1.4111e-01, -1.3548e-01, -1.0563e-01,
2.3054e-01, -2.1822e-02, -6.6938e-03,
-1.0259e-01, 4.3577e-02, -1.7630e-01,
1.6484e-01, 4.2413e-01, 6.9475e-02,
-2.4705e-01, 2.5757e-01, -9.5611e-02,
1.0236e-01, -3.4820e-02, -6.8818e-03,
-1.1434e-01, -3.1800e-01, 2.1337e-02,
-1.9939e-01, -2.6532e-01, 7.3361e-02,
6.5939e-02, 9.5812e-02, -7.0156e-02,
-1.6249e-02, -1.5927e-02, -1.1189e-01,
-9.3936e-03, -1.0933e-01, -2.9399e-02,
-2.8752e-02, -4.5613e-02, -1.2718e-02,
3.8781e-01, 2.6776e-01, -1.0373e-02,
-2.3927e-02, -6.4398e-02, 9.9117e-02,
-6.0732e-02, -5.5917e-03, 5.1716e-02,
-1.4168e-01, 1.7661e-01, -5.5893e-02,
-3.0419e-01, -3.5537e-01, 2.1978e-01,
-1.8610e-01, -5.7743e-03, 3.2649e-02,
1.9975e-01, 1.6508e-01, 1.3808e-02,
1.0733e-01, 1.4722e-01, 5.8671e-02,
6.4940e-02, 1.6114e-01, 3.9697e-02,
1.1530e-01, 2.4021e-01, -2.1669e-01,
6.0220e-02, 2.0257e-01, -1.5227e-01,
-6.1096e-02, 6.6511e-02, -1.3858e-01,
-6.5275e-02, 1.0891e-01, 8.2048e-02,
-6.7907e-02, 2.2863e-02, -1.0322e-01,
1.6542e-01, -1.4436e-01, 6.4125e-02,
-1.0378e-01, -3.2346e-01, -1.5123e-02,
3.8758e-03, 1.1006e-01, -4.4325e-02,
-1.0102e-01, -3.7699e-02, 9.2472e-02,
-6.8972e-02, -1.2308e-02, 1.6478e-01,
3.4351e-02, -1.7461e-02, 1.0301e-01,
-2.7125e-01, -5.6730e-02, -2.5989e-01,
-3.0163e-01, -1.4826e-01, -3.4955e-01,
-1.6259e-01, -1.6708e-01, -2.7964e-01,
-6.7134e-02, -2.2385e-01, 2.1776e-01,
-1.1351e-02, -3.7861e-01, 1.8687e-01,
4.0551e-02, 8.1943e-02, 1.0866e-01,
1.0273e-01, 1.1844e-01, -1.1852e-01,
2.6758e-02, -8.5806e-02, 5.9444e-02,
-5.1627e-02, 7.1636e-02, 2.2841e-01,
-3.7242e-03, 2.9723e-01, 1.1918e-01,
8.4994e-02, -3.5747e-01, 3.6148e-02,
9.9705e-02, -1.3736e-01, -6.0080e-02,
1.2370e-01, 5.0668e-02, -6.0246e-02,
6.0562e-02, -3.5068e-01, -3.2645e-01,
9.1020e-04, 6.6203e-02, -1.0770e-01,
1.9434e-02, 3.0018e-01, 2.8018e-01,
1.4021e-01, 2.7481e-01, 2.2868e-01,
4.8540e-02, 1.7719e-01, -4.5834e-02,
-9.6349e-02, -2.3008e-02, -1.4497e-01,
4.3053e-02, -1.0161e-01, 2.8750e-02,
-1.2594e-01, -1.0388e-02, -4.3966e-02,
7.5993e-02, -7.1609e-02, 1.4624e-02,
4.1110e-02, 7.1258e-02, -2.9109e-02,
-5.8698e-03, 1.2389e-01, 4.7648e-02,
-6.1585e-04, -4.4556e-02, -2.3373e-02,
-4.4883e-02, -7.7722e-02, -7.3635e-02,
-2.7750e-02, -1.5117e-03, -8.7368e-02,
2.5113e-02, 7.7490e-02, 2.9024e-02,
1.5426e-01, 2.5472e-01, 4.8057e-02,
-1.1969e-01, -1.1487e-01, -1.1802e-01,
-4.7392e-02, -4.2226e-02, 3.1968e-02,
-2.6717e-01, -5.0206e-02, 8.1946e-04,
-4.0426e-02, 1.4373e-01, -3.3121e-03,
-4.5292e-02, -2.4538e-02, 1.0377e-01,
-1.7780e-02, 2.0058e-01, -2.4343e-02,
-1.1714e-02, 1.5984e-01, -1.2638e-01,
6.4655e-02, 3.7703e-02, 3.7970e-02,
9.1864e-03, 1.1468e-01, -6.2760e-04,
-1.4812e-01, 6.5670e-03, 1.0765e-01,
1.5023e-01, -7.0594e-02, -1.3924e-01,
3.6016e-02, -3.9078e-02, -3.8950e-02,
1.8735e-02, -1.5573e-01, -1.2456e-01
}
,
{
4.8634e-02, -1.3617e-01, 6.1231e-02,
-7.0235e-02, -6.4110e-01, 1.5985e-01,
8.6151e-02, 1.1847e-01, 1.3819e-01,
-3.6017e-04, -3.2273e-02, -8.5485e-02,
-7.0804e-03, 2.1751e-01, 7.2575e-03,
-8.3606e-02, -1.4885e-01, -1.2702e-01,
4.0848e-41, 8.0934e-40, -1.8889e-40,
-3.9103e-40, -7.4709e-40, 3.8377e-40,
-2.4159e-40, -4.7610e-40, 7.7359e-40,
-8.6217e-05, -5.9763e-05, -4.0558e-05,
-7.4966e-05, -4.7074e-05, -3.1656e-05,
-9.8390e-05, -6.6833e-05, -4.7669e-05,
3.5375e-02, 2.8660e-02, 4.1277e-02,
1.6289e-01, -3.2199e-01, -1.7845e-02,
2.4659e-01, -3.9618e-02, 4.1065e-03,
2.7267e-02, 8.6819e-02, 9.5070e-02,
-7.2700e-02, -2.8826e-01, 1.1750e-03,
2.5259e-02, 2.4681e-03, 6.4737e-02,
7.3023e-03, 2.9631e-02, 1.0820e-02,
-2.1400e-02, 5.4244e-01, 1.5639e-01,
-1.7561e-01, 4.8947e-01, -8.8305e-02,
6.5073e-02, 3.4922e-01, 1.3483e-01,
1.4506e-01, -2.5472e-01, -7.2894e-02,
4.5945e-02, 1.4040e-01, 1.2148e-01,
-2.6932e-01, -1.1518e-01, -9.3158e-03,
-2.3961e-01, -1.2479e-01, -8.9796e-02,
1.8688e-02, -4.9267e-02, 7.7189e-02,
-7.3691e-02, 7.8186e-03, 1.3761e-02,
-1.5689e-01, 3.1138e-02, 3.9231e-02,
-4.3607e-03, 2.0813e-01, 5.5635e-02,
-6.7000e-41, 9.8995e-41, 3.0043e-40,
6.7190e-40, 4.0827e-40, 7.6057e-40,
4.2208e-40, 8.1141e-40, -3.3569e-40,
1.0179e-03, 5.1543e-04, 3.8076e-04,
7.3507e-04, 4.5432e-04, 3.7410e-04,
9.3014e-04, 6.7365e-04, 6.0051e-04,
-5.1998e-02, 6.5768e-02, 3.1603e-02,
-3.0198e-02, -3.1692e-02, -6.9299e-02,
1.7672e-02, 2.3766e-01, 5.7877e-02,
-5.7944e-02, 1.2624e-01, -1.4396e-01,
-4.1542e-02, 6.5110e-01, 1.0942e-01,
-1.3133e-01, 5.0538e-02, -2.7371e-02,
-3.7515e-02, 2.8703e-02, 1.2382e-03,
3.8542e-01, -2.2754e-02, 3.4459e-02,
3.0545e-01, -5.3817e-01, -2.1389e-03,
1.3888e-02, -2.2775e-01, -6.3692e-02,
-1.8430e-01, 5.8452e-02, 4.5764e-02,
-8.5045e-02, -1.7060e-01, -1.8565e-02,
-2.0384e-02, -3.3018e-02, -5.1135e-02,
-4.5789e-02, -1.8105e-01, 3.5419e-02,
-5.0081e-02, 8.7719e-02, 1.0373e-01,
-1.0033e-02, 7.0530e-02, -7.8012e-03,
8.4042e-02, 1.1982e-01, -9.6046e-02,
-6.4009e-02, -1.0711e-01, -1.3523e-01,
1.8868e-41, -7.0039e-40, -7.2568e-40,
1.7408e-40, -7.8143e-40, -6.8130e-40,
-6.3142e-40, -6.2560e-40, -7.4238e-40,
2.6297e-04, 7.0014e-05, -4.0981e-04,
2.6263e-04, 4.2811e-05, -4.9950e-04,
3.9795e-04, 1.2615e-04, -4.7660e-04,
7.5933e-02, 2.6295e-02, 2.7984e-02,
-5.5914e-03, -8.7981e-02, -9.2618e-02,
4.2725e-02, -3.1210e-01, 1.3412e-01,
5.2683e-02, 3.9891e-01, 2.9150e-02,
-6.6090e-02, 2.9455e-01, -1.9710e-01,
1.4546e-02, -2.5572e-02, 8.1125e-02,
1.2271e-01, 1.6097e-01, 4.5644e-02,
3.6101e-02, -1.7174e-02, 6.6110e-02,
1.5078e-01, 4.5180e-01, 7.7154e-02,
-5.9725e-02, 1.0185e-01, 1.1363e-03,
6.7791e-02, 1.7696e-02, 5.2638e-02,
3.3051e-02, -8.4049e-02, 1.4380e-01,
1.8744e-02, -2.0940e-01, -2.1424e-01,
-2.1329e-01, -1.3154e-01, -3.2572e-01,
1.1292e-01, 1.2361e-02, -1.5506e-01,
-1.0362e-02, 1.9955e-02, 4.2639e-02,
-2.1952e-02, -2.4682e-02, -2.4453e-02,
-2.5606e-02, -3.3580e-02, -3.6340e-02,
-5.0830e-40, 6.3797e-40, -5.2775e-40,
-7.7988e-40, -7.4579e-40, -5.1901e-40,
-3.8275e-41, -5.7607e-40, -1.3656e-40,
2.7164e-04, 5.9977e-04, 8.6886e-04,
3.0116e-04, 7.0106e-04, 1.0248e-03,
2.9177e-04, 6.4748e-04, 9.4825e-04,
6.6310e-02, 1.5240e-02, -5.3044e-02,
1.2545e-01, 5.0582e-02, 2.7358e-02,
1.9338e-01, 1.1377e-01, 4.6110e-02,
-3.1997e-02, 1.5171e-02, -4.9372e-02,
5.4615e-04, 1.7262e-01, -2.2081e-01,
8.4871e-02, 1.7824e-02, -3.6429e-02,
4.2821e-02, -1.0055e-01, 4.8927e-02,
1.2524e-01, 5.8859e-02, -2.0980e-02,
2.2897e-01, 1.7594e-01, 3.4239e-02,
1.0915e-01, 1.2088e-01, 1.0151e-01,
6.8449e-03, -1.5546e-01, 1.2024e-01,
4.9036e-02, -1.2245e-01, 4.6713e-02,
7.5083e-03, -4.8084e-02, 9.7731e-03,
4.8779e-02, 3.1848e-02, -9.3517e-02,
6.4595e-02, 3.9337e-02, -7.2343e-02,
3.9519e-02, 4.1867e-02, -5.0485e-02,
2.5257e-02, 1.4071e-01, 1.3606e-01,
1.7481e-01, 2.0210e-01, 1.7241e-01,
-7.6295e-40, -7.8460e-40, -4.1806e-41,
-7.9994e-40, -7.3271e-40, -6.2665e-40,
-7.9602e-40, -7.0226e-40, -7.4131e-40,
-4.5544e-04, -5.2379e-04, -7.0755e-04,
-3.3807e-04, -3.8123e-04, -5.3222e-04,
-3.1771e-04, -3.4586e-04, -4.8784e-04,
-3.5257e-02, -1.1866e-02, 1.9717e-02,
-6.0777e-02, -7.3127e-03, -3.2825e-02,
-1.4952e-01, 3.2117e-01, -6.3786e-02,
-1.0255e-02, 1.2961e-01, -8.6823e-02,
1.6994e-01, 4.7491e-01, 2.7135e-01,
2.8538e-03, 1.5572e-01, -3.3736e-02,
8.5996e-02, -1.0176e-02, 2.6629e-02,
7.3362e-02, -7.7525e-03, 5.6261e-02,
1.0819e-01, -2.5863e-01, -5.7146e-03,
-7.1781e-02, 2.8376e-03, 7.8298e-02,
1.3183e-01, 2.7149e-02, -9.9786e-02,
9.0491e-02, 8.7938e-02, -2.1882e-02,
4.1396e-03, -4.5816e-02, -7.8892e-02,
-6.3855e-03, 1.7502e-01, 1.2053e-01,
1.2492e-01, 6.1258e-02, -4.0516e-02,
-4.5409e-02, -4.5877e-02, -7.6414e-02,
-1.0573e-02, -1.2517e-01, -4.3991e-02,
-2.6447e-02, -9.5478e-02, -2.4735e-02,
-4.6548e-41, -1.6443e-40, -3.1221e-40,
-3.2675e-40, -2.7265e-40, -3.1190e-40,
-2.2065e-40, -2.5407e-40, -6.9511e-40,
-1.2727e-04, -2.6585e-04, -3.5516e-04,
3.4272e-05, -1.6810e-04, -3.1677e-04,
-5.5355e-05, -2.9924e-04, -4.3692e-04,
-5.6428e-02, 1.0771e-01, 1.0185e-01,
2.2948e-01, -7.8744e-02, 6.0768e-04,
-2.2355e-03, -2.0128e-03, -5.7317e-03,
-7.1232e-03, 1.0297e-01, 1.6872e-01,
1.9194e-01, -1.1578e-01, 1.0732e-01,
-8.6952e-02, 3.2901e-02, -6.6658e-03,
7.3979e-02, 8.3875e-02, -7.6372e-03,
1.9577e-01, 2.7391e-01, 4.5275e-02,
1.5610e-01, 2.3802e-01, 1.6555e-02,
1.3814e-01, 1.2870e-01, 9.1626e-02,
-4.6890e-02, -8.8734e-02, 7.8866e-02,
1.0027e-01, 2.2139e-01, 1.0050e-01,
-6.5845e-02, -1.0990e-01, -6.9896e-02,
4.1687e-02, 3.0631e-02, -8.8441e-02,
-1.1868e-01, 1.0836e-02, 2.5873e-02,
-1.7114e-02, 7.6295e-02, 1.5439e-02,
-2.4271e-02, 5.8538e-02, 9.8190e-02,
4.9742e-02, 8.7807e-02, 6.5871e-02,
-7.2669e-40, -7.5936e-41, -7.4975e-40,
-1.6984e-42, -1.7334e-40, -8.4954e-41,
-2.1556e-41, -1.5374e-40, -1.5515e-40,
-6.2626e-04, -7.2727e-04, -8.1665e-04,
-5.6584e-04, -6.1190e-04, -6.9584e-04,
-5.6278e-04, -5.8554e-04, -6.3554e-04,
8.1550e-02, -4.1817e-03, 1.2301e-02,
-4.5800e-02, 4.6708e-02, -8.7972e-02,
-2.9880e-01, 2.6456e-01, 3.9363e-03,
-3.0939e-02, -1.9921e-01, -3.8689e-03,
-8.6803e-02, 3.4857e-01, -1.0201e-01,
2.1597e-02, 1.4380e-02, 4.3448e-02,
7.1195e-02, 1.4980e-01, 3.8079e-02,
-1.2678e-01, -8.1274e-02, -4.3445e-02,
5.2482e-02, -1.8763e-01, 1.1557e-01,
-9.4614e-02, 5.4415e-02, -3.1485e-02,
-3.6451e-02, 1.4379e-01, 5.2291e-02,
-9.2069e-02, 9.5675e-02, -5.8433e-02,
7.5768e-03, -7.1280e-02, -1.4576e-01,
-1.4671e-01, -1.2446e-01, -1.5207e-01,
-5.4368e-02, 3.8303e-02, -8.1794e-02,
2.0492e-02, 4.0910e-02, 1.1379e-02,
3.1582e-02, 3.6039e-02, -4.4040e-03,
1.7540e-02, 1.4097e-04, -6.4367e-02,
-7.9553e-40, -5.3941e-40, -7.1912e-40,
-5.8099e-40, -6.8315e-40, -6.6012e-40,
-7.6242e-40, -5.4784e-40, -7.0267e-40,
-2.9197e-04, -2.1994e-04, -1.9501e-04,
-2.6516e-05, -1.2642e-05, -8.4345e-05,
1.6763e-04, 1.1268e-04, -5.4516e-05,
-3.8007e-03, -6.8765e-02, -9.5716e-02,
6.3091e-02, -8.1971e-02, -9.2895e-02,
-6.8353e-03, 7.3639e-02, 1.3505e-01,
9.0083e-02, 2.4352e-01, 3.9708e-02,
-5.4051e-02, -6.8748e-02, -1.8937e-01,
-1.9808e-03, -7.1337e-02, -2.8316e-02,
8.1504e-02, 8.3226e-03, 6.9013e-03,
9.4393e-02, 5.9322e-02, 5.5023e-02,
1.0236e-01, -4.0205e-02, 3.5172e-02,
6.5381e-02, 4.9075e-02, -5.3931e-02,
4.3961e-02, 9.0223e-03, -4.1678e-02,
-6.4262e-02, -5.0304e-02, -9.3597e-02
}
,
{
3.8496e-01, 1.4287e-01, 3.4530e-02,
-5.5398e-01, -6.0381e-02, 1.2078e-02,
7.9983e-02, 2.1478e-01, -5.7915e-02,
-1.4020e-01, -2.6914e-02, 1.5915e-02,
1.2371e-01, 2.5496e-01, -2.9867e-02,
1.3269e-02, -9.9596e-02, -2.3173e-01,
5.1471e-02, -4.5507e-01, -7.7620e-02,
-5.1328e-02, -1.9808e-02, -4.7051e-02,
3.0573e-02, 7.8762e-02, -7.2627e-02,
6.8690e-02, -4.0125e-02, 5.6657e-02,
8.0208e-02, -2.0075e-02, 1.4019e-01,
-5.7959e-02, -7.3152e-02, 2.0202e-02,
-8.8702e-02, -1.9911e-01, -1.5570e-01,
2.8401e-02, 5.8802e-02, 1.3050e-01,
2.1905e-02, -3.4298e-02, 4.0447e-02,
1.0184e-01, -9.0101e-02, -9.2770e-02,
1.1713e-02, -3.2514e-01, 1.9393e-01,
-9.4227e-02, 2.7053e-01, -9.7233e-02,
-1.0478e-01, 6.0652e-02, 8.3399e-02,
1.1104e-01, 2.9008e-01, 4.9208e-02,
-1.5414e-02, 3.1718e-02, -7.9083e-02,
-5.2358e-03, 9.0101e-02, 5.2973e-02,
5.5527e-02, -1.6599e-02, -8.5167e-02,
-5.1018e-02, 7.2243e-03, -9.5684e-02,
-5.0608e-02, -6.7864e-02, -8.9496e-02,
-2.4348e-01, 2.7477e-01, -1.7588e-01,
1.3927e-01, 5.5502e-02, -1.3370e-02,
-4.3509e-02, -2.1511e-01, -5.9070e-02,
1.0293e-01, 4.2678e-01, -8.7527e-02,
-6.8546e-02, -5.6296e-02, -8.7962e-02,
-8.6130e-02, 9.2069e-02, 7.2303e-02,
2.4365e-02, 2.1988e-01, -7.9408e-03,
-3.0063e-02, 1.1554e-01, -5.0311e-02,
1.0605e-02, 5.4598e-02, 1.3826e-02,
-1.4342e-02, 1.5353e-01, -5.3974e-03,
1.5583e-01, -6.0889e-02, -1.5772e-02,
-2.5956e-02, -3.5285e-01, -2.0338e-01,
2.6011e-01, 2.2737e-01, -1.4693e-01,
-7.7964e-02, 1.0053e-01, -5.4278e-02,
-3.0668e-02, 3.4556e-02, -3.4321e-02,
7.8695e-02, -2.2357e-01, 9.5733e-02,
1.7483e-01, -1.5153e-01, -1.8262e-03,
4.7605e-02, -2.2834e-01, 4.6383e-02,
1.5701e-01, 3.2264e-01, 1.0334e-02,
6.3351e-02, 1.1340e-01, 8.3478e-02,
6.4196e-02, 3.3460e-02, 8.8473e-02,
5.4663e-02, -1.7665e-03, -4.1935e-02,
-6.1346e-03, -5.4463e-02, -6.2960e-02,
2.8159e-02, 2.9903e-02, 9.2429e-03,
-3.0041e-02, -9.7783e-02, -4.9500e-02,
9.5350e-02, -7.9143e-02, -1.3244e-01,
-6.5129e-02, 1.4568e-01, 6.6843e-02,
1.5241e-01, -7.8736e-02, 1.0721e-01,
-5.9015e-02, 1.5320e-01, 3.0796e-01,
-5.4266e-03, -6.0804e-02, 3.7326e-02,
7.4844e-02, 4.8340e-02, 1.5251e-01,
3.8158e-02, 1.2087e-01, -8.9003e-02,
-5.8369e-02, -7.3813e-02, 1.2240e-02,
-4.5106e-03, 7.4580e-02, 1.2042e-01,
4.1959e-02, 1.4529e-01, 5.3636e-03,
-4.9708e-03, -1.0775e-02, -5.9374e-02,
1.5358e-02, 1.7277e-02, -1.5412e-01,
8.1647e-02, 3.3503e-02, -8.1934e-02,
-1.5807e-02, -1.0001e-02, -1.0059e-02,
-9.0493e-03, -7.8954e-02, 4.3891e-02,
-9.3815e-03, 3.2241e-02, 4.7962e-02,
-7.2252e-03, 7.9324e-02, 2.0662e-02,
-5.7710e-02, -5.1142e-02, -1.4296e-01,
2.1501e-02, -1.9518e-02, -2.7658e-02,
1.4983e-01, 8.5447e-02, 7.2092e-04,
1.1275e-01, 6.1131e-02, 5.7955e-02,
1.5624e-02, 2.7225e-01, 1.1716e-01,
-1.6322e-04, -1.3368e-04, -1.5575e-04,
-1.0525e-04, -1.0765e-04, -1.5306e-04,
-8.9692e-05, -1.0857e-04, -1.7316e-04,
-1.8015e-03, -1.3733e-03, -3.9154e-04,
-1.8453e-03, -1.4238e-03, -4.4163e-04,
-1.5511e-03, -1.1131e-03, -2.0087e-04,
-2.4082e-03, -2.2576e-03, -1.9231e-03,
-2.4913e-03, -2.4136e-03, -2.1678e-03,
-2.5057e-03, -2.4650e-03, -2.2732e-03,
-2.3901e-05, -1.5870e-05, -5.8255e-06,
-1.5163e-05, -1.2370e-05, -6.0712e-06,
-1.3098e-05, -1.1132e-05, -5.7866e-06,
-5.9760e-03, -5.9998e-03, -6.0295e-03,
-5.9962e-03, -6.0100e-03, -6.0277e-03,
-6.0003e-03, -6.0059e-03, -6.0148e-03,
-3.2764e-05, -2.9574e-05, -2.8001e-05,
-1.0846e-05, -1.1569e-05, -1.4282e-05,
-1.6255e-06, -2.5666e-06, -4.7808e-06,
-5.1999e-03, -5.2334e-03, -5.2847e-03,
-5.2057e-03, -5.2283e-03, -5.2713e-03,
-5.2195e-03, -5.2321e-03, -5.2633e-03,
-3.0782e-06, -9.2118e-06, -1.6177e-05,
-1.6382e-06, -6.9559e-06, -1.4245e-05,
-1.1471e-06, -6.5984e-06, -1.4903e-05,
7.7574e-02, -1.2866e-02, 4.1348e-03,
-6.7298e-02, -1.3691e-01, 6.4079e-02,
3.7962e-02, 8.7737e-02, -4.1046e-02,
-2.8471e-02, 1.7647e-01, 6.4232e-02,
1.2316e-01, 3.6800e-01, -1.5740e-01,
-6.0839e-02, 1.5449e-02, -1.0761e-01,
-6.6869e-02, -1.2867e-01, -4.0195e-02,
-4.9651e-02, -5.5500e-02, -2.5879e-02,
2.0179e-02, 6.8467e-02, 2.6575e-02,
-6.7728e-04, -7.6269e-02, 2.3470e-02,
7.1869e-02, -1.1855e-01, -2.1067e-02,
1.3263e-01, -3.2957e-02, -3.4365e-03,
8.1936e-02, 1.3073e-01, 1.1477e-01,
1.2429e-01, 1.6129e-01, 1.6251e-01,
1.5476e-02, 3.2862e-02, 2.1999e-02,
-2.9189e-02, -3.3615e-02, 5.5616e-04,
-2.4059e-02, -9.6181e-03, -4.1175e-02,
-6.3680e-04, -9.6559e-02, -9.1448e-02,
3.0238e-02, 1.2534e-01, 1.5256e-02,
-4.2118e-02, 1.5723e-01, 2.6929e-03,
1.9873e-02, 5.3050e-02, -1.0153e-03,
2.0634e-02, 9.2825e-03, -6.8027e-03,
3.1335e-03, -7.7443e-03, -1.8307e-02,
7.9974e-03, -1.0283e-03, -6.2520e-03,
4.5050e-02, 9.9504e-02, -1.3404e-01,
-6.7271e-01, -5.7290e-02, 2.6919e-02,
2.3673e-01, 2.4688e-02, -2.0227e-02,
5.1389e-02, -3.9810e-02, -8.9700e-02,
2.8445e-02, 3.9136e-01, -1.1508e-01,
-1.0449e-01, -6.2005e-02, 6.5721e-02,
-1.9123e-01, -4.2613e-02, 3.5371e-02,
1.9207e-01, 8.7916e-02, 4.8089e-02,
-5.7912e-02, 1.0014e-01, -9.4659e-02,
1.1240e-02, -6.2254e-03, 1.3399e-01,
1.6483e-01, -3.5079e-01, 1.1612e-02,
2.9215e-01, 5.6875e-02, 6.9505e-02,
1.3721e-02, 1.2607e-01, 2.6426e-02,
-2.0529e-01, 2.1768e-01, 2.1232e-01,
-6.3574e-02, 2.3504e-02, -1.0811e-01,
-1.3470e-02, -3.6446e-02, -5.4379e-02,
-1.3257e-01, -8.3412e-02, 3.7745e-02,
5.8778e-02, -2.6060e-01, 3.8262e-02,
-4.3689e-03, -6.6703e-02, -2.2025e-01,
-9.0961e-02, 1.3855e-01, 3.4573e-04,
-2.9613e-01, -3.6138e-02, -1.3827e-01,
4.5896e-02, -5.3871e-02, -1.0037e-01,
1.8457e-01, 1.0338e-01, -5.7306e-02,
5.5510e-02, -9.4938e-02, -5.6527e-05,
1.6372e-01, -3.3854e-02, 5.6332e-02,
-4.0251e-01, -5.9428e-02, -9.1470e-02,
-1.5921e-02, -5.7948e-02, 8.1682e-03,
-3.7833e-03, 1.6293e-01, 5.3784e-02,
1.1053e-01, -1.3867e-01, 2.6772e-02,
-1.3133e-02, 3.7614e-01, 3.6361e-03,
-1.4205e-01, 3.1312e-02, -9.9928e-02,
-1.5755e-01, 4.2016e-01, 9.4065e-02,
2.7536e-02, 1.2620e-01, -1.4894e-01,
-4.2137e-02, -9.8700e-02, -1.7479e-01,
4.5836e-02, 5.3893e-02, -1.0138e-01,
8.3609e-02, 2.1849e-02, -1.0648e-01,
7.4801e-02, -1.2671e-01, -1.5007e-02,
2.7440e-01, -3.1351e-01, 6.5787e-02,
-6.7820e-02, 1.6312e-01, -1.3254e-02,
-2.5770e-02, -2.0041e-02, 5.8243e-02,
1.6055e-02, 1.1971e-02, -4.6112e-02,
-1.6276e-01, -1.5313e-02, -7.9826e-03,
9.1668e-02, 9.7722e-02, 1.3754e-01,
-7.4817e-02, -4.1923e-01, -1.2337e-01,
1.3472e-01, -4.0745e-02, -5.4055e-02,
-1.2943e-02, 4.8796e-02, 4.2007e-02,
9.4668e-02, 8.6149e-02, 1.2362e-01,
7.0637e-02, 2.3565e-01, 1.4582e-01,
5.6904e-02, -8.2166e-02, 1.0563e-01,
9.3969e-02, -2.2909e-01, 4.6537e-02,
6.5257e-02, 1.4804e-01, -6.2092e-02,
-1.5699e-02, -1.5303e-02, 1.6671e-01,
-6.1947e-03, 2.5749e-01, 1.5257e-01,
3.2908e-02, -5.9907e-02, 1.1502e-01,
7.5876e-02, -2.6699e-01, -1.5891e-02,
-8.0426e-02, 1.3406e-01, -1.9881e-02,
3.5472e-02, -8.2140e-02, 1.6509e-02,
8.3390e-03, -7.8291e-02, -2.0754e-01,
3.4490e-02, 2.7913e-01, 5.9566e-02,
2.5288e-02, 1.1725e-01, -1.0356e-01,
-5.0955e-02, 9.2093e-02, -5.8477e-02,
4.4325e-02, 3.2973e-02, -1.9477e-01,
3.9582e-02, -8.6877e-02, -1.1753e-01,
3.0401e-02, -2.8757e-02, -2.5563e-02,
5.0741e-02, -3.5056e-01, -2.5584e-01,
9.1709e-02, -4.0932e-02, 2.3812e-01,
5.0945e-02, 4.9246e-02, 1.2738e-01,
5.1440e-03, 1.5703e-01, 5.5743e-02,
-3.9492e-02, 1.2114e-01, 2.0531e-02,
8.0800e-02, 2.6680e-03, -1.6660e-02,
1.0684e-01, 1.2308e-01, 1.7882e-02,
1.8280e-02, 1.0972e-01, -5.2912e-03
}
,
{
-1.3812e-02, -4.6271e-02, 7.3790e-02,
-6.3801e-02, -3.6817e-01, -1.7880e-02,
5.2986e-02, 1.8626e-01, 1.5645e-03,
1.2367e-02, -6.2923e-02, 3.0844e-02,
9.3623e-02, 1.9527e-01, -2.6366e-02,
-2.0837e-02, -3.4424e-02, 4.0256e-02,
4.1482e-02, 6.1795e-02, -1.1293e-02,
-8.9944e-02, -1.3608e-01, 1.8067e-02,
3.6974e-02, 5.2530e-03, -2.7474e-02,
1.1872e-05, 1.9000e-05, 2.0729e-05,
1.0139e-05, 1.6832e-05, 1.9392e-05,
6.5445e-06, 1.0973e-05, 1.3521e-05,
-5.3340e-02, 1.3108e-03, 4.0436e-02,
5.7068e-02, -2.7923e-02, -5.4781e-02,
-2.9293e-02, 2.7145e-02, 2.7340e-02,
5.3520e-03, 1.8766e-02, 4.0297e-01,
2.6473e-02, -3.4675e-02, -1.1783e-01,
-2.5038e-02, -1.7702e-02, -3.4908e-02,
1.4847e-02, 2.3237e-01, -6.3687e-02,
-6.5672e-02, -2.1888e-01, -1.7233e-02,
4.0608e-02, -6.9580e-02, -2.2200e-02,
5.8163e-02, 1.3695e-01, -2.6257e-02,
-1.3328e-01, -3.5730e-01, 2.4507e-02,
-4.5611e-03, 2.0424e-01, -3.9821e-02,
5.5300e-02, -1.6006e-01, 1.1717e-01,
-2.6107e-02, -8.6995e-02, 8.3720e-02,
7.5494e-02, 3.2189e-01, 1.5527e-01,
-6.6869e-02, 1.4469e-01, 5.1805e-02,
9.8760e-02, -1.6759e-01, -1.2350e-01,
5.7005e-02, 8.4904e-02, 8.9713e-02,
-1.4263e-02, 2.8914e-02, 3.2239e-02,
-2.4871e-02, 5.6014e-02, -4.4469e-02,
3.1209e-02, 1.3677e-02, -2.1052e-02,
-1.6548e-03, -1.8796e-03, -1.9883e-03,
-1.6186e-03, -1.8494e-03, -1.9670e-03,
-1.5841e-03, -1.8173e-03, -1.9345e-03,
3.5726e-02, 1.8013e-01, 1.6913e-02,
-1.2168e-01, -6.3848e-02, 3.0555e-02,
3.0269e-02, -1.0260e-01, -1.5259e-02,
-4.7375e-03, 5.5115e-02, 6.2642e-01,
9.9776e-03, -2.1988e-01, -2.0984e-01,
7.0470e-03, 6.3178e-02, -1.3607e-02,
1.1918e-01, -2.4081e-01, 1.7889e-01,
-1.0514e-01, 2.9220e-01, -1.3263e-01,
5.6091e-03, -4.1623e-02, 2.5589e-02,
-1.8496e-01, 2.7698e-02, -6.5768e-02,
2.9677e-01, 4.4163e-02, 5.8530e-02,
-1.1010e-01, -7.6787e-02, 3.9844e-02,
5.2113e-03, -1.8202e-02, 1.4129e-03,
-6.1402e-03, -2.7222e-01, 7.4690e-02,
1.9131e-02, 2.2753e-01, 1.9587e-02,
-2.7391e-02, 6.7917e-03, 2.0496e-03,
6.7333e-02, 7.8262e-02, 2.1110e-03,
-5.4519e-02, 3.0763e-02, 1.5628e-02,
9.5055e-02, 3.8855e-02, 1.2446e-02,
-1.5152e-01, 7.8124e-02, -1.2616e-02,
9.3100e-03, -1.6528e-02, -1.2873e-02,
-1.8377e-03, -1.9231e-03, -1.8930e-03,
-1.8058e-03, -1.8841e-03, -1.8678e-03,
-1.7387e-03, -1.7966e-03, -1.7781e-03,
-4.5122e-02, 1.7027e-03, -3.5534e-03,
8.5222e-03, 1.0130e-01, 4.7893e-02,
6.5574e-02, 7.2150e-03, -2.1820e-03,
-5.5105e-03, -1.8990e-01, 2.6527e-02,
6.6140e-03, 2.1537e-01, -2.2183e-02,
-8.0628e-03, 6.8398e-03, 9.4474e-03,
1.2239e-01, -1.3337e-01, 7.3391e-02,
-1.2205e-01, 1.3145e-01, -2.0063e-02,
2.2168e-02, 3.6097e-03, 2.7146e-02,
4.6717e-02, 2.1122e-02, 1.5491e-02,
-1.3077e-01, 1.1635e-01, 1.0849e-02,
8.0113e-02, -8.4028e-02, 1.2863e-03,
-2.9796e-02, -8.4537e-02, -2.6766e-03,
-7.7771e-03, -2.4274e-03, 8.6274e-02,
-2.0354e-02, 4.1245e-02, 8.4227e-02,
5.5894e-02, 1.0706e-01, 5.2965e-02,
-7.8731e-03, 5.5825e-01, 1.0373e-01,
-1.1975e-01, -2.0071e-02, -2.5286e-02,
-7.7477e-02, 5.3589e-02, -1.5710e-03,
-1.2753e-01, 2.5166e-01, 8.2205e-03,
-9.8349e-02, -4.9539e-02, -5.4941e-02,
-4.9916e-03, -4.9986e-03, -5.0660e-03,
-4.9770e-03, -4.9840e-03, -5.0543e-03,
-4.9997e-03, -5.0114e-03, -5.0809e-03,
6.1819e-02, 1.5061e-01, 1.1984e-02,
1.2905e-01, 2.5921e-01, 1.4768e-01,
4.5548e-02, 1.4902e-01, -4.8961e-03,
-1.3605e-02, 8.2896e-02, -4.1931e-01,
-2.2657e-02, 2.4768e-01, 2.6528e-01,
-1.1566e-02, -8.7819e-03, 4.3618e-02,
-3.4332e-02, -1.8392e-01, 4.4471e-02,
-3.7073e-02, -5.4620e-02, 1.0899e-01,
3.7891e-02, 9.9487e-02, 3.2383e-02,
-6.3628e-02, -5.0303e-03, 5.4617e-02,
-8.7802e-02, 2.1977e-01, -6.0249e-03,
6.3554e-02, -5.4291e-02, -2.6709e-02,
-1.5505e-02, -6.7104e-02, 3.8607e-02,
-1.1427e-01, -3.2524e-01, 4.0077e-02,
-6.5144e-03, 1.2313e-01, -2.7924e-02,
1.4265e-02, -3.8338e-02, 8.6780e-02,
1.5341e-01, 1.2174e-01, -7.3160e-02,
2.6326e-04, 7.3690e-02, 5.2187e-02,
-3.3114e-02, -3.6588e-02, 1.1635e-02,
-3.3521e-02, 1.0767e-01, -8.9125e-03,
-2.2431e-02, -4.5655e-03, 7.5531e-03,
6.7227e-04, 7.2856e-04, 7.3907e-04,
6.5335e-04, 7.0702e-04, 7.1233e-04,
6.1540e-04, 6.7286e-04, 6.7797e-04,
-3.1496e-02, 6.0514e-02, 4.2013e-02,
-2.8617e-02, 1.4846e-02, 4.0016e-03,
4.7006e-03, -4.0017e-02, -3.0411e-02,
-9.6037e-03, 8.8522e-02, 9.8616e-02,
4.1297e-02, -3.2645e-01, -7.6144e-03,
-1.0711e-02, 3.9324e-02, 4.0144e-02,
5.2899e-02, -7.8668e-02, -5.4798e-02,
-2.0428e-01, 5.7238e-02, -3.6937e-02,
-3.6103e-02, -8.2683e-02, -2.8101e-02,
8.2479e-02, 5.7766e-02, -1.2019e-01,
-3.8373e-01, 6.8272e-02, -1.1758e-02,
5.1129e-02, -2.7931e-01, 4.5608e-02,
-2.5151e-02, -5.0816e-02, 1.7231e-02,
-3.6376e-02, 1.5916e-01, 2.9192e-02,
-4.1947e-02, 5.3183e-02, -9.7289e-02,
4.6138e-02, 7.0842e-02, 1.6673e-02,
-1.7243e-03, 2.7203e-01, 3.8262e-02,
-1.4000e-01, -7.3793e-02, -2.0050e-02,
-1.8750e-02, -8.5319e-02, -3.0858e-02,
-5.9981e-02, 1.2729e-01, 1.4094e-02,
-5.4088e-02, -2.3694e-02, -9.7485e-03,
-4.7840e-03, -4.8359e-03, -4.8727e-03,
-4.7882e-03, -4.8380e-03, -4.8755e-03,
-4.7859e-03, -4.8321e-03, -4.8633e-03,
4.9511e-02, 1.0935e-01, -3.7430e-03,
1.1834e-01, 7.7243e-02, 4.3074e-02,
6.7446e-02, 2.9734e-02, -1.1276e-02,
-2.0080e-02, 1.3561e-01, -1.3455e-01,
-1.4505e-02, 2.2100e-01, 4.9635e-02,
-1.0040e-02, 3.4560e-02, -7.4607e-03,
-6.8873e-02, -5.6221e-02, 1.2255e-02,
-2.9198e-02, 7.1612e-02, 2.9402e-02,
4.1036e-02, 4.6417e-02, 6.0284e-03,
-6.5261e-02, 2.1426e-03, 2.4192e-02,
-1.6073e-03, -6.2222e-03, -1.8295e-02,
2.4952e-04, -2.0623e-02, -3.3064e-03,
5.9188e-02, -4.8839e-02, 7.9840e-02,
-6.7952e-02, -4.7191e-01, 1.5117e-01,
1.5668e-01, 2.4733e-01, 1.1354e-01,
1.7742e-02, -4.4059e-02, 9.5374e-03,
3.2049e-01, -1.3779e-01, 9.6608e-02,
8.4580e-02, 1.4293e-01, 6.1574e-02,
2.8777e-03, 7.8795e-02, -5.1902e-02,
1.2212e-01, 1.0321e-01, 3.2360e-02,
-9.6617e-02, 7.8941e-03, -7.0876e-02,
3.5869e-03, 3.5891e-03, 3.5923e-03,
3.5746e-03, 3.5840e-03, 3.5967e-03,
3.5785e-03, 3.5932e-03, 3.6080e-03,
1.5454e-03, 3.0582e-03, 4.3737e-02,
-5.9833e-02, -1.1247e-01, 4.4380e-02,
-1.3206e-01, 8.2778e-03, 4.7963e-02,
-4.3720e-02, -7.5722e-03, 2.0510e-01,
3.0133e-02, -4.0506e-01, 2.7867e-01,
5.5586e-02, 2.8926e-02, 1.3360e-03,
1.9490e-05, 3.3326e-01, -7.7241e-02,
-1.5648e-01, 1.5195e-01, -1.3995e-01,
8.6519e-02, 1.0447e-01, -4.1413e-02,
-3.8667e-03, 1.6159e-01, 1.1627e-01,
-2.2646e-01, -3.4758e-02, -6.7956e-03,
-3.2689e-01, 1.9606e-01, -9.1523e-02,
1.1238e-02, 1.5084e-03, 4.2113e-02,
-1.1154e-02, -3.6596e-01, -7.2252e-02,
6.6621e-02, 1.0188e-01, 4.1032e-01,
3.5892e-02, -4.8304e-02, 6.6142e-03,
1.3374e-01, 2.2720e-01, -7.1224e-02,
6.8952e-02, 2.0467e-01, 5.0251e-02,
-6.2016e-02, 2.2175e-01, -1.7764e-02,
2.7542e-02, 1.4905e-01, 3.6637e-02,
-7.2231e-02, 5.0271e-03, -7.1823e-02,
3.5760e-03, 3.5540e-03, 3.5692e-03,
3.5664e-03, 3.5490e-03, 3.5689e-03,
3.5671e-03, 3.5619e-03, 3.5864e-03,
2.7470e-02, -3.9752e-02, 4.1063e-02,
-2.4985e-02, -1.7969e-01, 8.2186e-02,
-5.4251e-02, -5.9651e-03, 2.5079e-02,
-2.1197e-02, 2.5426e-02, 1.3585e-01,
-1.3460e-02, -1.1377e-01, 1.2278e-01,
3.6533e-02, 1.2843e-02, 5.6219e-02,
5.8141e-04, 2.8354e-01, -6.2016e-02,
-1.0289e-01, 1.8724e-01, -9.9475e-02,
5.1193e-02, 7.5986e-02, -1.2951e-03,
-8.2587e-02, 1.8498e-01, 1.0891e-01,
1.3538e-01, -4.7728e-01, 1.0868e-01,
-8.6415e-02, -1.7061e-01, 1.0457e-02
}
};
static __device__ __constant__ const float HDNL3biasL[8][8] =
{
{
-0.1175, -0.0258, -0.0053, -0.0437, -0.0563, -0.1047, -0.3449, 0.0568
}
,
{
0.0339, -0.1738, 0.0061, 0.1565, -0.0316, -0.0016, -0.0032, -0.0554
}
,
{
-0.0508, -0.0609, 0.0347, -0.0802, -0.0438, 0.2512, -0.0491, -0.0259
}
,
{
0.0655, 0.0255, 0.0228, -0.0027, -0.0155, -0.0163, -0.0174, -0.1095
}
,
{
4.9947e-03, 5.3372e-03, -4.5286e-09, -1.3756e-03, 3.8858e-03, -4.4197e-02, 3.3970e-02, 2.8411e-02
}
,
{
-0.0396, 0.0007, 0.1735, 0.0109, 0.1177, 0.0919, 0.0567, -0.0005
}
,
{
0.0127, -0.0688, 0.1102, -0.0052, 0.1602, -0.0191, -0.0322, 0.0311
}
,
{
0.0063, 0.0093, 0.0729, 0.3734, 0.0006, 0.1915, 0.3186, 0.2636
}
};
static __device__ __constant__ const float HDNL3kernelsL10[4 * 8] =
{
-0.0967, -0.3094,
0.3537, 0.5705,
0.2547, 0.3360,
-0.0718, -0.0700,
-0.3013, -0.1602,
0.4520, 0.0495,
0.1564, 0.3773,
-0.0216, 0.4367,
-0.4855, -0.1972,
-0.2026, -0.4390,
0.3743, -0.1156,
0.4408, -0.3123,
-0.3577, 0.0753,
-0.3396, 0.0336,
0.1052, -0.4180,
0.0799, -0.3587
};
__global__ static void conv1To8HDNL0(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 0)),
RELU(CHANNEL1TO8(1, 0)),
RELU(CHANNEL1TO8(2, 0)),
RELU(CHANNEL1TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 0)),
RELU(CHANNEL1TO8(5, 0)),
RELU(CHANNEL1TO8(6, 0)),
RELU(CHANNEL1TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv1To8HDNL1(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 1)),
RELU(CHANNEL1TO8(1, 1)),
RELU(CHANNEL1TO8(2, 1)),
RELU(CHANNEL1TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 1)),
RELU(CHANNEL1TO8(5, 1)),
RELU(CHANNEL1TO8(6, 1)),
RELU(CHANNEL1TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv1To8HDNL2(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 2)),
RELU(CHANNEL1TO8(1, 2)),
RELU(CHANNEL1TO8(2, 2)),
RELU(CHANNEL1TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 2)),
RELU(CHANNEL1TO8(5, 2)),
RELU(CHANNEL1TO8(6, 2)),
RELU(CHANNEL1TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv1To8HDNL3(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 3)),
RELU(CHANNEL1TO8(1, 3)),
RELU(CHANNEL1TO8(2, 3)),
RELU(CHANNEL1TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 3)),
RELU(CHANNEL1TO8(5, 3)),
RELU(CHANNEL1TO8(6, 3)),
RELU(CHANNEL1TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL0(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 0)),
RELU(CHANNEL8TO8(1, 0)),
RELU(CHANNEL8TO8(2, 0)),
RELU(CHANNEL8TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 0)),
RELU(CHANNEL8TO8(5, 0)),
RELU(CHANNEL8TO8(6, 0)),
RELU(CHANNEL8TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL1(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 1)),
RELU(CHANNEL8TO8(1, 1)),
RELU(CHANNEL8TO8(2, 1)),
RELU(CHANNEL8TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 1)),
RELU(CHANNEL8TO8(5, 1)),
RELU(CHANNEL8TO8(6, 1)),
RELU(CHANNEL8TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL2(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 2)),
RELU(CHANNEL8TO8(1, 2)),
RELU(CHANNEL8TO8(2, 2)),
RELU(CHANNEL8TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 2)),
RELU(CHANNEL8TO8(5, 2)),
RELU(CHANNEL8TO8(6, 2)),
RELU(CHANNEL8TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL3(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 3)),
RELU(CHANNEL8TO8(1, 3)),
RELU(CHANNEL8TO8(2, 3)),
RELU(CHANNEL8TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 3)),
RELU(CHANNEL8TO8(5, 3)),
RELU(CHANNEL8TO8(6, 3)),
RELU(CHANNEL8TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL0(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
T c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL1(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
T c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL2(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
T c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<typename T>
__global__ static void convTranspose8To1HDNL3(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
T c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f);
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL0<uchar>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL1<uchar>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL2<uchar>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL3<uchar>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL0<ushort>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL1<ushort>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL2<ushort>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
template<>
__global__ void convTranspose8To1HDNL3<ushort>(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
ushort c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f) * 65535.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
void Anime4KCPP::Cuda::cuRunKernelACNetB(const unsigned char* inputData, unsigned char* outputData, ACCudaParamACNet * param)
{
cudaError_t err = cudaSuccess;
if (currCudaDeviceID)
{
err = cudaSetDevice(currCudaDeviceID);
CheckCudaErr(err);
}
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaChannelFormatDesc inoutChannelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
cudaChannelFormatDesc tmpChannelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(param->orgW, param->orgH, 2);
const int W = 2 * param->orgW, H = 2 * param->orgH;
cudaArray_t cuInputArray;
err = cudaMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
cudaArray_t cuArray1;
err = cudaMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuArray2;
err = cudaMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuOutputArray;
err = cudaMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, cudaArraySurfaceLoadStore);
CheckCudaErr(err);
struct cudaResourceDesc resDesc;
struct cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuInputArray;
cudaTextureObject_t inTex = 0;
err = cudaCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
cudaSurfaceObject_t surf1 = 0;
err = cudaCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
cudaSurfaceObject_t surf2 = 0;
err = cudaCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
cudaSurfaceObject_t outSurf = 0;
err = cudaCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = cudaMemcpy2DToArrayAsync(cuInputArray, 0, 0, inputData,
param->stride, sizeof(uchar) * param->orgW, param->orgH,
cudaMemcpyHostToDevice, stream);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0, uchar)
break;
case 1:
RUNKERNEL(1, uchar)
break;
case 2:
RUNKERNEL(2, uchar)
break;
case 3:
RUNKERNEL(3, uchar)
break;
default:
RUNKERNEL(0, uchar)
break;
}
err = cudaHostRegister(outputData, sizeof(uchar) * W * H, cudaHostRegisterDefault);
CheckCudaErr(err);
err = cudaMemcpy2DFromArrayAsync(outputData, sizeof(uchar) * W,
cuOutputArray, 0, 0, sizeof(uchar) * W, H,
cudaMemcpyDeviceToHost, stream);
CheckCudaErr(err);
err = cudaStreamSynchronize(stream);
CheckCudaErr(err);
err = cudaHostUnregister(outputData);
CheckCudaErr(err);
cudaDestroyTextureObject(inTex);
cudaDestroySurfaceObject(surf1);
cudaDestroySurfaceObject(surf2);
cudaDestroySurfaceObject(outSurf);
cudaFreeArray(cuInputArray);
cudaFreeArray(cuArray1);
cudaFreeArray(cuArray2);
cudaFreeArray(cuOutputArray);
cudaStreamDestroy(stream);
}
void Anime4KCPP::Cuda::cuRunKernelACNetW(const unsigned short int* inputData, unsigned short int* outputData, ACCudaParamACNet* param)
{
cudaError_t err = cudaSuccess;
if (currCudaDeviceID)
{
err = cudaSetDevice(currCudaDeviceID);
CheckCudaErr(err);
}
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaChannelFormatDesc inoutChannelDesc = cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsigned);
cudaChannelFormatDesc tmpChannelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(param->orgW, param->orgH, 2);
const int W = 2 * param->orgW, H = 2 * param->orgH;
cudaArray_t cuInputArray;
err = cudaMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
cudaArray_t cuArray1;
err = cudaMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuArray2;
err = cudaMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuOutputArray;
err = cudaMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, cudaArraySurfaceLoadStore);
CheckCudaErr(err);
struct cudaResourceDesc resDesc;
struct cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuInputArray;
cudaTextureObject_t inTex = 0;
err = cudaCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
cudaSurfaceObject_t surf1 = 0;
err = cudaCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
cudaSurfaceObject_t surf2 = 0;
err = cudaCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
cudaSurfaceObject_t outSurf = 0;
err = cudaCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = cudaMemcpy2DToArrayAsync(cuInputArray, 0, 0, inputData,
param->stride, sizeof(ushort) * param->orgW, param->orgH,
cudaMemcpyHostToDevice, stream);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0, ushort)
break;
case 1:
RUNKERNEL(1, ushort)
break;
case 2:
RUNKERNEL(2, ushort)
break;
case 3:
RUNKERNEL(3, ushort)
break;
default:
RUNKERNEL(0, ushort)
break;
}
err = cudaHostRegister(outputData, sizeof(ushort) * W * H, cudaHostRegisterDefault);
CheckCudaErr(err);
err = cudaMemcpy2DFromArrayAsync(outputData, sizeof(ushort) * W,
cuOutputArray, 0, 0, sizeof(ushort) * W, H,
cudaMemcpyDeviceToHost, stream);
CheckCudaErr(err);
err = cudaStreamSynchronize(stream);
CheckCudaErr(err);
err = cudaHostUnregister(outputData);
CheckCudaErr(err);
cudaDestroyTextureObject(inTex);
cudaDestroySurfaceObject(surf1);
cudaDestroySurfaceObject(surf2);
cudaDestroySurfaceObject(outSurf);
cudaFreeArray(cuInputArray);
cudaFreeArray(cuArray1);
cudaFreeArray(cuArray2);
cudaFreeArray(cuOutputArray);
cudaStreamDestroy(stream);
}
void Anime4KCPP::Cuda::cuRunKernelACNetF(const float* inputData, float* outputData, ACCudaParamACNet* param)
{
cudaError_t err = cudaSuccess;
if (currCudaDeviceID)
{
err = cudaSetDevice(currCudaDeviceID);
CheckCudaErr(err);
}
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaChannelFormatDesc inoutChannelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc tmpChannelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(param->orgW, param->orgH, 2);
const int W = 2 * param->orgW, H = 2 * param->orgH;
cudaArray_t cuInputArray;
err = cudaMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
cudaArray_t cuArray1;
err = cudaMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuArray2;
err = cudaMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuOutputArray;
err = cudaMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, cudaArraySurfaceLoadStore);
CheckCudaErr(err);
struct cudaResourceDesc resDesc;
struct cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuInputArray;
cudaTextureObject_t inTex = 0;
err = cudaCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
cudaSurfaceObject_t surf1 = 0;
err = cudaCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
cudaSurfaceObject_t surf2 = 0;
err = cudaCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
cudaSurfaceObject_t outSurf = 0;
err = cudaCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = cudaMemcpy2DToArrayAsync(cuInputArray, 0, 0, inputData,
param->stride, sizeof(float) * param->orgW, param->orgH,
cudaMemcpyHostToDevice, stream);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0, float)
break;
case 1:
RUNKERNEL(1, float)
break;
case 2:
RUNKERNEL(2, float)
break;
case 3:
RUNKERNEL(3, float)
break;
default:
RUNKERNEL(0, float)
break;
}
err = cudaHostRegister(outputData, sizeof(float) * W * H, cudaHostRegisterDefault);
CheckCudaErr(err);
err = cudaMemcpy2DFromArrayAsync(outputData, sizeof(float) * W,
cuOutputArray, 0, 0, sizeof(float) * W, H,
cudaMemcpyDeviceToHost, stream);
CheckCudaErr(err);
err = cudaStreamSynchronize(stream);
CheckCudaErr(err);
err = cudaHostUnregister(outputData);
CheckCudaErr(err);
cudaDestroyTextureObject(inTex);
cudaDestroySurfaceObject(surf1);
cudaDestroySurfaceObject(surf2);
cudaDestroySurfaceObject(outSurf);
cudaFreeArray(cuInputArray);
cudaFreeArray(cuArray1);
cudaFreeArray(cuArray2);
cudaFreeArray(cuOutputArray);
cudaStreamDestroy(stream);
}
|
53e55dee66d101e7989c16301b454cf0c52fc8c1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_lteScalarf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_lteScalarf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_lteScalarf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_lteScalarf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 53e55dee66d101e7989c16301b454cf0c52fc8c1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_lteScalarf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_lteScalarf<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_lteScalarf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_lteScalarf<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bdb5c10dd0b0b09f35b2cb6d445e415e4894645c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// N
#include <stdio.h>
#define N 24
__global__
void add(int *a, int *b) {
int i = blockIdx.x;
if (i<N) {
b[i] = a[i]*a[i];
}
}
int main() {
int ha[N], hb[N];
int *da, *db;
hipMalloc((void **)&da, N*sizeof(int));
hipMalloc((void **)&db, N*sizeof(int));
for (int i = 0; i<N; ++i) {
ha[i] = i;
}
hipMemcpy(da, ha, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, da, db);
hipMemcpy(hb, db, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i<N; ++i) {
printf("%d\n", hb[i]);
}
hipFree(da);
hipFree(db);
return 0;
} | bdb5c10dd0b0b09f35b2cb6d445e415e4894645c.cu | // Квадрат числа N
#include <stdio.h>
#define N 24
__global__
void add(int *a, int *b) {
int i = blockIdx.x;
if (i<N) {
b[i] = a[i]*a[i];
}
}
int main() {
int ha[N], hb[N];
int *da, *db;
cudaMalloc((void **)&da, N*sizeof(int));
cudaMalloc((void **)&db, N*sizeof(int));
for (int i = 0; i<N; ++i) {
ha[i] = i;
}
cudaMemcpy(da, ha, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<N, 1>>>(da, db);
cudaMemcpy(hb, db, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i<N; ++i) {
printf("%d\n", hb[i]);
}
cudaFree(da);
cudaFree(db);
return 0;
} |
4aea3fdbd36ace8e6e6bc9344a905324cc62a854.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include <cutf/memory.hpp>
#include <cutf/type.hpp>
#include <cutf/debug/matrix.hpp>
constexpr unsigned warp_size = 32;
__global__ void m16n16k16_tf32(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) {
constexpr unsigned N = 16;
const unsigned lane_id = threadIdx.x & 0x1f;
const auto m = lane_id & 0xf;
const auto n_offset = lane_id / N;
for (unsigned i = 0; i < N; i+= warp_size / N) {
const auto n = i + n_offset;
float sum = 0.0f;
for (unsigned k = 0; k < N; k++) {
sum += cutf::type::cast<nvcuda::wmma::precision::tf32>(a_ptr[m + k * N]) * cutf::type::cast<nvcuda::wmma::precision::tf32>(b_ptr[k + n * N]);
}
c_ptr[m + n * N] += sum;
}
}
__global__ void m16n16k16_fp32(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) {
constexpr unsigned N = 16;
const unsigned lane_id = threadIdx.x & 0x1f;
const auto m = lane_id & 0xf;
const auto n_offset = lane_id / N;
for (unsigned i = 0; i < N; i+= warp_size / N) {
const auto n = i + n_offset;
float sum = 0.0f;
for (unsigned k = 0; k < N; k++) {
sum += a_ptr[m + k * N] * b_ptr[k + n * N];
}
c_ptr[m + n * N] += sum;
}
}
double get_max_error(const float* const fp32_ptr, const float* const tf32_ptr, const unsigned m, const unsigned n) {
double max_error = 0.0;
for (unsigned i = 0; i < m; i++) {
for (unsigned j = 0; j < n; j++) {
max_error = ::max(std::abs(static_cast<double>(fp32_ptr[i * n + j]) - static_cast<double>(tf32_ptr[i * n + j])), max_error);
}
}
return max_error;
}
int main() {
constexpr unsigned N = 16;
auto A = cutf::memory::get_host_unique_ptr<float>(N * N);
auto B = cutf::memory::get_host_unique_ptr<float>(N * N);
auto C_tf32 = cutf::memory::get_host_unique_ptr<float>(N * N);
auto C_fp32 = cutf::memory::get_host_unique_ptr<float>(N * N);
std::mt19937 mt(std::random_device{}());
float max_range = 1.0f;
std::uniform_real_distribution<float> dist(-max_range, max_range);
for (unsigned i = 0; i < N * N; i++) {
A.get()[i] = dist(mt);
B.get()[i] = dist(mt);
C_tf32.get()[i] = 0.0f;
C_fp32.get()[i] = 0.0f;
}
hipLaunchKernelGGL(( m16n16k16_tf32), dim3(1), dim3(warp_size), 0, 0, C_tf32.get(), A.get(), B.get());
hipLaunchKernelGGL(( m16n16k16_fp32), dim3(1), dim3(warp_size), 0, 0, C_fp32.get(), A.get(), B.get());
hipDeviceSynchronize();
std::printf("max_error = %e\n", get_max_error(C_fp32.get(), C_tf32.get(), N, N));
}
| 4aea3fdbd36ace8e6e6bc9344a905324cc62a854.cu | #include <iostream>
#include <random>
#include <cutf/memory.hpp>
#include <cutf/type.hpp>
#include <cutf/debug/matrix.hpp>
constexpr unsigned warp_size = 32;
__global__ void m16n16k16_tf32(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) {
constexpr unsigned N = 16;
const unsigned lane_id = threadIdx.x & 0x1f;
const auto m = lane_id & 0xf;
const auto n_offset = lane_id / N;
for (unsigned i = 0; i < N; i+= warp_size / N) {
const auto n = i + n_offset;
float sum = 0.0f;
for (unsigned k = 0; k < N; k++) {
sum += cutf::type::cast<nvcuda::wmma::precision::tf32>(a_ptr[m + k * N]) * cutf::type::cast<nvcuda::wmma::precision::tf32>(b_ptr[k + n * N]);
}
c_ptr[m + n * N] += sum;
}
}
__global__ void m16n16k16_fp32(float* const c_ptr, const float* const a_ptr, const float* const b_ptr) {
constexpr unsigned N = 16;
const unsigned lane_id = threadIdx.x & 0x1f;
const auto m = lane_id & 0xf;
const auto n_offset = lane_id / N;
for (unsigned i = 0; i < N; i+= warp_size / N) {
const auto n = i + n_offset;
float sum = 0.0f;
for (unsigned k = 0; k < N; k++) {
sum += a_ptr[m + k * N] * b_ptr[k + n * N];
}
c_ptr[m + n * N] += sum;
}
}
double get_max_error(const float* const fp32_ptr, const float* const tf32_ptr, const unsigned m, const unsigned n) {
double max_error = 0.0;
for (unsigned i = 0; i < m; i++) {
for (unsigned j = 0; j < n; j++) {
max_error = std::max(std::abs(static_cast<double>(fp32_ptr[i * n + j]) - static_cast<double>(tf32_ptr[i * n + j])), max_error);
}
}
return max_error;
}
int main() {
constexpr unsigned N = 16;
auto A = cutf::memory::get_host_unique_ptr<float>(N * N);
auto B = cutf::memory::get_host_unique_ptr<float>(N * N);
auto C_tf32 = cutf::memory::get_host_unique_ptr<float>(N * N);
auto C_fp32 = cutf::memory::get_host_unique_ptr<float>(N * N);
std::mt19937 mt(std::random_device{}());
float max_range = 1.0f;
std::uniform_real_distribution<float> dist(-max_range, max_range);
for (unsigned i = 0; i < N * N; i++) {
A.get()[i] = dist(mt);
B.get()[i] = dist(mt);
C_tf32.get()[i] = 0.0f;
C_fp32.get()[i] = 0.0f;
}
m16n16k16_tf32<<<1, warp_size>>>(C_tf32.get(), A.get(), B.get());
m16n16k16_fp32<<<1, warp_size>>>(C_fp32.get(), A.get(), B.get());
cudaDeviceSynchronize();
std::printf("max_error = %e\n", get_max_error(C_fp32.get(), C_tf32.get(), N, N));
}
|
fe4571d3f0fe9b109b70102bfb67d411f7719370.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BilateralFilter.h"
#include "generalcuda.h"
#include <texture_types.h>
#include <texture_fetch_functions.h>
__constant__ float mask[FILTER_SIZE * FILTER_SIZE];
__constant__ float mask1D[FILTER_SIZE];
__constant__ float pre_exp[256];
__constant__ float pre_sigmar[2048];
texture<unsigned char, hipTextureType2D, hipReadModeElementType> CmapTexture;
extern "C" void setDistanceMask(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (hipMemcpyToSymbol(mask, h_Kernel, mem_size));
}
extern "C" void setDistanceMask1D(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (hipMemcpyToSymbol(mask1D, h_Kernel, mem_size));
}
extern "C" void setPre_calculation(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (hipMemcpyToSymbol(pre_exp, h_Kernel, mem_size));
}
extern "C" void setPre_sigmaR(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (hipMemcpyToSymbol(pre_sigmar, h_Kernel, mem_size));
}
extern "C" void setTextureRellenado(DetectedParameters const *parameters, hipArray **CmapArray)
{
CmapTexture.addressMode[0] = hipAddressModeBorder;
CmapTexture.addressMode[1] = hipAddressModeBorder;
CmapTexture.filterMode = hipFilterModePoint;
CmapTexture.normalized = false;
CUDA_SAFE_CALL (hipBindTextureToArray(CmapTexture, *CmapArray));
}
// Realiza el filtrado bilateral conjunto sobre una imagen de depth fuera de la zona de incertidumbre
__global__ void imageBilateralFilterKernel(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
int dif;
float sigmar;
int _2sigmar2;
int filterCenter = FILTER_SIZE/2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
//unsigned short int pixelD = modelo[y*nc + x];
/*if (maskDNC[y*nc + x] == 255 && modelo[y*nc + x] > input[y*nc + x]){
output[y*nc + x] = input[y*nc + x];
return;
}*/
if (pixelD == 0){
output[y*nc + x] = pixelD;
return;
}
if (maskB[y*nc + x] == 0 && foreground[y*nc + x] == 0 && maskDNC[y*nc + x] == 0){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
//sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
sigmar = pre_sigmar[pixelD/3]*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[(y-filterCenter+i)*nc + (x-filterCenter+j)] > 0 && foreground[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 0 && maskB[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 0){ //&& maskDNC[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
if (maskDNC[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 255 /*&& modelo[(y-filterCenter+i)*nc + (x-filterCenter+j)] > input[(y-filterCenter+i)*nc + (x-filterCenter+j)]*/)
continue;
//dif = modelo[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = modelo[(y-filterCenter+i)*nc + (x-filterCenter+j)] - pixelD;
float A = 100000000*100000000*dif*dif /_2sigmar2;
A=-A;
//float A= ((dif*dif)>>1) /(sigmar*sigmar);
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(A);//(2*PI*sigmar*sigmar);
//weightR = 1+A/*+(A*A/2)*//*+(A*A*A/6)*//*+(A*A*A*A/24)*//*+(A*A*A*A*A/120)*//*+(A*A*A*A*A*A/720)*/;
//weightR = __expf(-((dif*dif) /(2*sigmar*sigmar)));//(2*PI*sigmar*sigmar);
norm += mask[i*FILTER_SIZE+j] * weightR;
//norm += __fmul_[rn,rz,ru,rd](mask[i*FILTER_SIZE+j], weightR);
sum += input[(y-filterCenter+i)*nc + (x-filterCenter+j)] * mask[i*FILTER_SIZE+j] * weightR;
}
}
}
}
output[y*nc + x] = sum/norm;
}
else if (foreground[y*nc + x] == 255){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
//sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
sigmar = pre_sigmar[pixelD/3]*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[(y-filterCenter+i)*nc + (x-filterCenter+j)] > 0 && foreground[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 255 /*&& maskB[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 0*/){
//dif = input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = input[(y-filterCenter+i)*nc + (x-filterCenter+j)] - pixelD;
float A= 100000000*100000000*(dif*dif) /_2sigmar2;
A=-A;
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(A);//(2*PI*sigmar*sigmar);
//weightR = 1+A/*+(A*A/2)*//*+(A*A*A/6)*//*+(A*A*A*A/24)*//*+(A*A*A*A*A/120)*//*+(A*A*A*A*A*A/720)*/;
norm += mask[i*FILTER_SIZE+j] * weightR;
sum += input[(y-filterCenter+i)*nc + (x-filterCenter+j)] * mask[i*FILTER_SIZE+j] * weightR;
}
}
}
}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageBilateralFilter(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
hipLaunchKernelGGL(( imageBilateralFilterKernel), dim3(grid), dim3(threads) , 0, 0, input, modelo, maskB, maskDNC, foreground, output, nl, nc, a, b, c);
}
// Realiza el filtrado bilateral conjunto sobre una imagen de depth fuera de la zona de incertidumbre (aproximacin filtro separable horizontal)
__global__ void imageBilateralFilterKernelH(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
int dif;
float sigmar;
int _2sigmar2;
int filterCenter = FILTER_SIZE/2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
/*if (maskDNC[y*nc + x] == 255 && modelo[y*nc + x] > input[y*nc + x]){
output[y*nc + x] = input[y*nc + x];
return;
}*/
if (pixelD == 0){
output[y*nc + x] = pixelD;
return;
}
if (maskB[y*nc + x] == 0 && foreground[y*nc + x] == 0 && maskDNC[y*nc + x] == 0){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
//for (int i = 0; i < FILTER_SIZE; i++){
//if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[y*nc + (x-filterCenter+j)] > 0 && foreground[y*nc + (x-filterCenter+j)] == 0 && maskB[y*nc + (x-filterCenter+j)] == 0){ //&& maskDNC[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
if (maskDNC[y*nc + (x-filterCenter+j)] == 255 /*&& modelo[y*nc + (x-filterCenter+j)] > input[y*nc + (x-filterCenter+j)]*/)
continue;
//dif = modelo[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = modelo[y*nc + (x-filterCenter+j)] - pixelD;
float A = 100000000*100000000*dif*dif /_2sigmar2;
//float A= ((dif*dif)>>1) /(sigmar*sigmar);
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
//weightR = __expf(-((dif*dif) /(2*sigmar*sigmar)));//(2*PI*sigmar*sigmar);
norm += mask1D[j] * weightR;
//norm += __fmul_[rn,rz,ru,rd](mask[i*FILTER_SIZE+j], weightR);
sum += input[y*nc + (x-filterCenter+j)] * mask1D[j] * weightR;
}
}
//}
//}
output[y*nc + x] = sum/norm;
}
else if (foreground[y*nc + x] == 255){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
//for (int i = 0; i < FILTER_SIZE; i++){
//if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[y*nc + (x-filterCenter+j)] > 0 && foreground[y*nc + (x-filterCenter+j)] == 255/* && maskB[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0*/){
//if (maskDNC[y*nc + (x-filterCenter+j)] == 255 /*&& modelo[y*nc + (x-filterCenter+j)] > input[y*nc + (x-filterCenter+j)]*/)
// continue;
//dif = input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = input[y*nc + (x-filterCenter+j)] - pixelD;
float A= 100000000*100000000*(dif*dif) /_2sigmar2;
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
norm += mask1D[j] * weightR;
sum += input[y*nc + (x-filterCenter+j)] * mask1D[j] * weightR;
}
}
//}
//}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageBilateralFilterH(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
hipLaunchKernelGGL(( imageBilateralFilterKernelH), dim3(grid), dim3(threads) , 0, 0, input, modelo, maskB, maskDNC, foreground, output, nl, nc, a, b, c);
}
// Realiza el filtrado bilateral conjunto sobre una imagen de depth fuera de la zona de incertidumbre (aproximacin filtro separable vertical)
__global__ void imageBilateralFilterKernelV(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
int dif;
float sigmar;
int _2sigmar2;
int filterCenter = FILTER_SIZE/2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
/*if (maskDNC[y*nc + x] == 255 && modelo[y*nc + x] > input[y*nc + x]){
output[y*nc + x] = input[y*nc + x];
return;
}*/
if (pixelD == 0){
output[y*nc + x] = pixelD;
return;
}
if (maskB[y*nc + x] == 0 && foreground[y*nc + x] == 0 && maskDNC[y*nc + x] == 0){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
//for (int j = 0; j < FILTER_SIZE; j++){
if (/*(x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && */input[(y-filterCenter+i)*nc + x] > 0 && foreground[(y-filterCenter+i)*nc + x] == 0 && maskB[(y-filterCenter+i)*nc + x] == 0){ //&& maskDNC[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
if (maskDNC[(y-filterCenter+i)*nc + x] == 255 /*&& modelo[(y-filterCenter+i)*nc + x] > input[(y-filterCenter+i)*nc + x]*/)
continue;
//dif = modelo[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = modelo[(y-filterCenter+i)*nc + x] - pixelD;
float A = 100000000*100000000*dif*dif /_2sigmar2;
//float A= ((dif*dif)>>1) /(sigmar*sigmar);
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
//weightR = __expf(-((dif*dif) /(2*sigmar*sigmar)));//(2*PI*sigmar*sigmar);
norm += mask1D[i] * weightR;
//norm += __fmul_[rn,rz,ru,rd](mask[i*FILTER_SIZE+j], weightR);
sum += input[(y-filterCenter+i)*nc + x] * mask1D[i] * weightR;
}
//}
}
}
output[y*nc + x] = sum/norm;
}
else if (foreground[y*nc + x] == 255){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
//for (int j = 0; j < FILTER_SIZE; j++){
if (/*(x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && */input[(y-filterCenter+i)*nc + x] > 0 && foreground[(y-filterCenter+i)*nc + x] == 255/* && maskB[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0*/){
//if (maskDNC[(y-filterCenter+i)*nc + x] == 255 /*&& modelo[(y-filterCenter+i)*nc + x] > input[(y-filterCenter+i)*nc + x]*/)
// continue;
//dif = input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = input[(y-filterCenter+i)*nc + x] - pixelD;
float A= 100000000*100000000*(dif*dif) /_2sigmar2;
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
norm += mask1D[i] * weightR;
sum += input[(y-filterCenter+i)*nc + x] * mask1D[i] * weightR;
}
//}
}
}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageBilateralFilterV(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
hipLaunchKernelGGL(( imageBilateralFilterKernelV), dim3(grid), dim3(threads) , 0, 0, input, modelo, maskB, maskDNC, foreground, output, nl, nc, a, b, c);
}
// Realiza un filtrado bilateral conjunto para rellenar aquellos pixeles sin informacin que tienen un nmero mnimo de
// vecinos fiables
__global__ void imageRellenarKernel(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
//int dif1, dif2, dif3;
int3 dif;
int cont = 0;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
if (pixelD > 0){
output[y*nc + x] = pixelD;
return;
}
uchar3 pixelC = {color[y*nc*nch + x*nch], color[y*nc*nch + x*nch + 1], color[y*nc*nch + x*nch + 2]};
uchar3 pixelM = {modeloColor[y*nc*nch + x*nch], modeloColor[y*nc*nch + x*nch + 1], modeloColor[y*nc*nch + x*nch + 2]};
//int _2sigmar2 = 200*sigmar*sigmar;
int minPixel = 10 * porcen * F.height * F.width;
if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 0){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] > 0 && foreground[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
unsigned char Cmap = tex2D(CmapTexture, x-(F.width/2-j), y-F.height/2+i);
if (Cmap < 128+cMin)
continue;
dif.x = modeloColor[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch] - pixelM.x;
dif.y = modeloColor[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelM.y;
dif.z = modeloColor[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelM.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask[i*F.width+j] * weightR;
sum += input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] * mask[i*F.width+j] * weightR;
cont++;
}
}
}
}
if (10*cont > minPixel)
output[y*nc + x] = sum/norm;
else
output[y*nc + x] = pixelD;
}
else if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 255){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] > 0 && foreground[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 255){
dif.x = color[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch] - pixelC.x;
dif.y = color[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelC.y;
dif.z = color[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelC.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask[i*F.width+j] * weightR;
sum += input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] * mask[i*F.width+j] * weightR;
}
}
}
}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageRellenar(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
hipLaunchKernelGGL(( imageRellenarKernel), dim3(grid), dim3(threads) , 0, 0, F, input, color, modeloColor, foreground, output, nl, nc, nch, sigmar, porcen, cMin);
}
// Realiza un filtrado bilateral conjunto para rellenar aquellos pixeles sin informacin que tienen un nmero mnimo de
// vecinos fiables (aproximacin separable horizontal)
__global__ void imageRellenarKernelH(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
//int dif1, dif2, dif3;
int3 dif;
int cont = 0;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
if (pixelD > 0){
output[y*nc + x] = pixelD;
return;
}
uchar3 pixelC = {color[y*nc*nch + x*nch], color[y*nc*nch + x*nch + 1], color[y*nc*nch + x*nch + 2]};
uchar3 pixelM = {modeloColor[y*nc*nch + x*nch], modeloColor[y*nc*nch + x*nch + 1], modeloColor[y*nc*nch + x*nch + 2]};
//int _2sigmar2 = 200*sigmar*sigmar;
int minPixel = 10 * porcen * F.width;
if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 0){
//for (int i = 0; i < F.height; i++){
//if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[y*nc + (x-(F.width/2-j))] > 0 && foreground[y*nc + (x-(F.width/2-j))] == 0){
unsigned char Cmap = tex2D(CmapTexture, x-(F.width/2-j), y);
if (Cmap < 128+cMin)
continue;
dif.x = modeloColor[y*nc*nch + (x-(F.width/2-j))*nch] - pixelM.x;
dif.y = modeloColor[y*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelM.y;
dif.z = modeloColor[y*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelM.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[j] * weightR;
sum += input[y*nc + (x-(F.width/2-j))] * mask1D[j] * weightR;
cont++;
}
}
//}
//}
if (10*cont > minPixel)
output[y*nc + x] = sum/norm;
else
output[y*nc + x] = pixelD;
}
else {//if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 255){
//for (int i = 0; i < F.height; i++){
//if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[y*nc + (x-(F.width/2-j))] > 0 && foreground[y*nc + (x-(F.width/2-j))] == 255){
dif.x = color[y*nc*nch + (x-(F.width/2-j))*nch] - pixelC.x;
dif.y = color[y*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelC.y;
dif.z = color[y*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelC.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[j] * weightR;
sum += input[y*nc + (x-(F.width/2-j))] * mask1D[j] * weightR;
}
}
//}
//}
output[y*nc + x] = sum/norm;
}
//else
// output[y*nc + x] = pixelD;
}
extern "C" void imageRellenarH(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
hipLaunchKernelGGL(( imageRellenarKernelH), dim3(grid), dim3(threads) , 0, 0, F, input, color, modeloColor, foreground, output, nl, nc, nch, sigmar, porcen, cMin);
}
// Realiza un filtrado bilateral conjunto para rellenar aquellos pixeles sin informacin que tienen un nmero mnimo de
// vecinos fiables (aproximacin separable vertical)
__global__ void imageRellenarKernelV(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
//int dif1, dif2, dif3;
int3 dif;
int cont = 0;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
if (pixelD > 0){
output[y*nc + x] = pixelD;
return;
}
uchar3 pixelC = {color[y*nc*nch + x*nch], color[y*nc*nch + x*nch + 1], color[y*nc*nch + x*nch + 2]};
uchar3 pixelM = {modeloColor[y*nc*nch + x*nch], modeloColor[y*nc*nch + x*nch + 1], modeloColor[y*nc*nch + x*nch + 2]};
//int _2sigmar2 = 200*sigmar*sigmar;
int minPixel = 10 * porcen * F.height;
if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 0){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
//for (int j = 0; j < F.width; j++){
if (/*(x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) &&*/ input[(y-F.height/2+i)*nc + x] > 0 && foreground[(y-F.height/2+i)*nc + x] == 0){
unsigned char Cmap = tex2D(CmapTexture, x, y-F.height/2+i);
if (Cmap < 128+cMin)
continue;
dif.x = modeloColor[(y-F.height/2+i)*nc*nch + x*nch] - pixelM.x;
dif.y = modeloColor[(y-F.height/2+i)*nc*nch + x*nch + 1] - pixelM.y;
dif.z = modeloColor[(y-F.height/2+i)*nc*nch + x*nch + 2] - pixelM.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[i] * weightR;
sum += input[(y-F.height/2+i)*nc + x] * mask1D[i] * weightR;
cont++;
}
//}
}
}
if (10*cont > minPixel)
output[y*nc + x] = sum/norm;
else
output[y*nc + x] = pixelD;
}
else{ //if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 255){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
//for (int j = 0; j < F.width; j++){
if (/*(x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && */input[(y-F.height/2+i)*nc + x] > 0 && foreground[(y-F.height/2+i)*nc + x] == 255){
dif.x = color[(y-F.height/2+i)*nc*nch + x*nch] - pixelC.x;
dif.y = color[(y-F.height/2+i)*nc*nch + x*nch + 1] - pixelC.y;
dif.z = color[(y-F.height/2+i)*nc*nch + x*nch + 2] - pixelC.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[i] * weightR;
sum += input[(y-F.height/2+i)*nc + x] * mask1D[i] * weightR;
}
//}
}
}
output[y*nc + x] = sum/norm;
}
//else
// output[y*nc + x] = pixelD;
}
extern "C" void imageRellenarV(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
hipLaunchKernelGGL(( imageRellenarKernelV), dim3(grid), dim3(threads) , 0, 0, F, input, color, modeloColor, foreground, output, nl, nc, nch, sigmar, porcen, cMin);
}
| fe4571d3f0fe9b109b70102bfb67d411f7719370.cu | #include "BilateralFilter.h"
#include "generalcuda.h"
#include <texture_types.h>
#include <texture_fetch_functions.h>
__constant__ float mask[FILTER_SIZE * FILTER_SIZE];
__constant__ float mask1D[FILTER_SIZE];
__constant__ float pre_exp[256];
__constant__ float pre_sigmar[2048];
texture<unsigned char, cudaTextureType2D, cudaReadModeElementType> CmapTexture;
extern "C" void setDistanceMask(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (cudaMemcpyToSymbol(mask, h_Kernel, mem_size));
}
extern "C" void setDistanceMask1D(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (cudaMemcpyToSymbol(mask1D, h_Kernel, mem_size));
}
extern "C" void setPre_calculation(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (cudaMemcpyToSymbol(pre_exp, h_Kernel, mem_size));
}
extern "C" void setPre_sigmaR(float *h_Kernel, size_t mem_size){
CUDA_SAFE_CALL (cudaMemcpyToSymbol(pre_sigmar, h_Kernel, mem_size));
}
extern "C" void setTextureRellenado(DetectedParameters const *parameters, cudaArray **CmapArray)
{
CmapTexture.addressMode[0] = cudaAddressModeBorder;
CmapTexture.addressMode[1] = cudaAddressModeBorder;
CmapTexture.filterMode = cudaFilterModePoint;
CmapTexture.normalized = false;
CUDA_SAFE_CALL (cudaBindTextureToArray(CmapTexture, *CmapArray));
}
// Realiza el filtrado bilateral conjunto sobre una imagen de depth fuera de la zona de incertidumbre
__global__ void imageBilateralFilterKernel(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
int dif;
float sigmar;
int _2sigmar2;
int filterCenter = FILTER_SIZE/2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
//unsigned short int pixelD = modelo[y*nc + x];
/*if (maskDNC[y*nc + x] == 255 && modelo[y*nc + x] > input[y*nc + x]){
output[y*nc + x] = input[y*nc + x];
return;
}*/
if (pixelD == 0){
output[y*nc + x] = pixelD;
return;
}
if (maskB[y*nc + x] == 0 && foreground[y*nc + x] == 0 && maskDNC[y*nc + x] == 0){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
//sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
sigmar = pre_sigmar[pixelD/3]*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[(y-filterCenter+i)*nc + (x-filterCenter+j)] > 0 && foreground[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 0 && maskB[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 0){ //&& maskDNC[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
if (maskDNC[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 255 /*&& modelo[(y-filterCenter+i)*nc + (x-filterCenter+j)] > input[(y-filterCenter+i)*nc + (x-filterCenter+j)]*/)
continue;
//dif = modelo[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = modelo[(y-filterCenter+i)*nc + (x-filterCenter+j)] - pixelD;
float A = 100000000*100000000*dif*dif /_2sigmar2;
A=-A;
//float A= ((dif*dif)>>1) /(sigmar*sigmar);
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(A);//(2*PI*sigmar*sigmar);
//weightR = 1+A/*+(A*A/2)*//*+(A*A*A/6)*//*+(A*A*A*A/24)*//*+(A*A*A*A*A/120)*//*+(A*A*A*A*A*A/720)*/;
//weightR = __expf(-((dif*dif) /(2*sigmar*sigmar)));//(2*PI*sigmar*sigmar);
norm += mask[i*FILTER_SIZE+j] * weightR;
//norm += __fmul_[rn,rz,ru,rd](mask[i*FILTER_SIZE+j], weightR);
sum += input[(y-filterCenter+i)*nc + (x-filterCenter+j)] * mask[i*FILTER_SIZE+j] * weightR;
}
}
}
}
output[y*nc + x] = sum/norm;
}
else if (foreground[y*nc + x] == 255){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
//sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
sigmar = pre_sigmar[pixelD/3]*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[(y-filterCenter+i)*nc + (x-filterCenter+j)] > 0 && foreground[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 255 /*&& maskB[(y-filterCenter+i)*nc + (x-filterCenter+j)] == 0*/){
//dif = input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = input[(y-filterCenter+i)*nc + (x-filterCenter+j)] - pixelD;
float A= 100000000*100000000*(dif*dif) /_2sigmar2;
A=-A;
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(A);//(2*PI*sigmar*sigmar);
//weightR = 1+A/*+(A*A/2)*//*+(A*A*A/6)*//*+(A*A*A*A/24)*//*+(A*A*A*A*A/120)*//*+(A*A*A*A*A*A/720)*/;
norm += mask[i*FILTER_SIZE+j] * weightR;
sum += input[(y-filterCenter+i)*nc + (x-filterCenter+j)] * mask[i*FILTER_SIZE+j] * weightR;
}
}
}
}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageBilateralFilter(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
imageBilateralFilterKernel<<< grid, threads >>>(input, modelo, maskB, maskDNC, foreground, output, nl, nc, a, b, c);
}
// Realiza el filtrado bilateral conjunto sobre una imagen de depth fuera de la zona de incertidumbre (aproximación filtro separable horizontal)
__global__ void imageBilateralFilterKernelH(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
int dif;
float sigmar;
int _2sigmar2;
int filterCenter = FILTER_SIZE/2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
/*if (maskDNC[y*nc + x] == 255 && modelo[y*nc + x] > input[y*nc + x]){
output[y*nc + x] = input[y*nc + x];
return;
}*/
if (pixelD == 0){
output[y*nc + x] = pixelD;
return;
}
if (maskB[y*nc + x] == 0 && foreground[y*nc + x] == 0 && maskDNC[y*nc + x] == 0){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
//for (int i = 0; i < FILTER_SIZE; i++){
//if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[y*nc + (x-filterCenter+j)] > 0 && foreground[y*nc + (x-filterCenter+j)] == 0 && maskB[y*nc + (x-filterCenter+j)] == 0){ //&& maskDNC[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
if (maskDNC[y*nc + (x-filterCenter+j)] == 255 /*&& modelo[y*nc + (x-filterCenter+j)] > input[y*nc + (x-filterCenter+j)]*/)
continue;
//dif = modelo[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = modelo[y*nc + (x-filterCenter+j)] - pixelD;
float A = 100000000*100000000*dif*dif /_2sigmar2;
//float A= ((dif*dif)>>1) /(sigmar*sigmar);
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
//weightR = __expf(-((dif*dif) /(2*sigmar*sigmar)));//(2*PI*sigmar*sigmar);
norm += mask1D[j] * weightR;
//norm += __fmul_[rn,rz,ru,rd](mask[i*FILTER_SIZE+j], weightR);
sum += input[y*nc + (x-filterCenter+j)] * mask1D[j] * weightR;
}
}
//}
//}
output[y*nc + x] = sum/norm;
}
else if (foreground[y*nc + x] == 255){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
//for (int i = 0; i < FILTER_SIZE; i++){
//if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
for (int j = 0; j < FILTER_SIZE; j++){
if ((x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && input[y*nc + (x-filterCenter+j)] > 0 && foreground[y*nc + (x-filterCenter+j)] == 255/* && maskB[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0*/){
//if (maskDNC[y*nc + (x-filterCenter+j)] == 255 /*&& modelo[y*nc + (x-filterCenter+j)] > input[y*nc + (x-filterCenter+j)]*/)
// continue;
//dif = input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = input[y*nc + (x-filterCenter+j)] - pixelD;
float A= 100000000*100000000*(dif*dif) /_2sigmar2;
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
norm += mask1D[j] * weightR;
sum += input[y*nc + (x-filterCenter+j)] * mask1D[j] * weightR;
}
}
//}
//}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageBilateralFilterH(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
imageBilateralFilterKernelH<<< grid, threads >>>(input, modelo, maskB, maskDNC, foreground, output, nl, nc, a, b, c);
}
// Realiza el filtrado bilateral conjunto sobre una imagen de depth fuera de la zona de incertidumbre (aproximación filtro separable vertical)
__global__ void imageBilateralFilterKernelV(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
int dif;
float sigmar;
int _2sigmar2;
int filterCenter = FILTER_SIZE/2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
/*if (maskDNC[y*nc + x] == 255 && modelo[y*nc + x] > input[y*nc + x]){
output[y*nc + x] = input[y*nc + x];
return;
}*/
if (pixelD == 0){
output[y*nc + x] = pixelD;
return;
}
if (maskB[y*nc + x] == 0 && foreground[y*nc + x] == 0 && maskDNC[y*nc + x] == 0){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
//for (int j = 0; j < FILTER_SIZE; j++){
if (/*(x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && */input[(y-filterCenter+i)*nc + x] > 0 && foreground[(y-filterCenter+i)*nc + x] == 0 && maskB[(y-filterCenter+i)*nc + x] == 0){ //&& maskDNC[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
if (maskDNC[(y-filterCenter+i)*nc + x] == 255 /*&& modelo[(y-filterCenter+i)*nc + x] > input[(y-filterCenter+i)*nc + x]*/)
continue;
//dif = modelo[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = modelo[(y-filterCenter+i)*nc + x] - pixelD;
float A = 100000000*100000000*dif*dif /_2sigmar2;
//float A= ((dif*dif)>>1) /(sigmar*sigmar);
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
//weightR = __expf(-((dif*dif) /(2*sigmar*sigmar)));//(2*PI*sigmar*sigmar);
norm += mask1D[i] * weightR;
//norm += __fmul_[rn,rz,ru,rd](mask[i*FILTER_SIZE+j], weightR);
sum += input[(y-filterCenter+i)*nc + x] * mask1D[i] * weightR;
}
//}
}
}
output[y*nc + x] = sum/norm;
}
else if (foreground[y*nc + x] == 255){
// calcular varianza en funcion de la profundidad del pixel (ruido dependiente con la distancia)
//sigmar = c + b*input[y*nc + x] + a*input[y*nc + x]*input[y*nc + x];
sigmar = (c + b*pixelD + a*pixelD*pixelD)*100000000;
_2sigmar2 = 2*sigmar*sigmar;
for (int i = 0; i < FILTER_SIZE; i++){
if ((y-filterCenter+i >= 0) && (y-filterCenter+i < nl)){
//for (int j = 0; j < FILTER_SIZE; j++){
if (/*(x-filterCenter+j >= 0) && (x-filterCenter+j < nc) && */input[(y-filterCenter+i)*nc + x] > 0 && foreground[(y-filterCenter+i)*nc + x] == 255/* && maskB[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0*/){
//if (maskDNC[(y-filterCenter+i)*nc + x] == 255 /*&& modelo[(y-filterCenter+i)*nc + x] > input[(y-filterCenter+i)*nc + x]*/)
// continue;
//dif = input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] - input[y*nc + x];
dif = input[(y-filterCenter+i)*nc + x] - pixelD;
float A= 100000000*100000000*(dif*dif) /_2sigmar2;
//weightR = exp(-A)/(2*PI*sigmar*sigmar);
weightR = __expf(-A);//(2*PI*sigmar*sigmar);
norm += mask1D[i] * weightR;
sum += input[(y-filterCenter+i)*nc + x] * mask1D[i] * weightR;
}
//}
}
}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageBilateralFilterV(const unsigned short int* input, const unsigned short int* modelo, const uchar* maskB, const uchar* maskDNC, const uchar* foreground, unsigned short int* output, int nl, int nc, float a, float b, float c)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
imageBilateralFilterKernelV<<< grid, threads >>>(input, modelo, maskB, maskDNC, foreground, output, nl, nc, a, b, c);
}
// Realiza un filtrado bilateral conjunto para rellenar aquellos pixeles sin información que tienen un número mínimo de
// vecinos fiables
__global__ void imageRellenarKernel(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
//int dif1, dif2, dif3;
int3 dif;
int cont = 0;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
if (pixelD > 0){
output[y*nc + x] = pixelD;
return;
}
uchar3 pixelC = {color[y*nc*nch + x*nch], color[y*nc*nch + x*nch + 1], color[y*nc*nch + x*nch + 2]};
uchar3 pixelM = {modeloColor[y*nc*nch + x*nch], modeloColor[y*nc*nch + x*nch + 1], modeloColor[y*nc*nch + x*nch + 2]};
//int _2sigmar2 = 200*sigmar*sigmar;
int minPixel = 10 * porcen * F.height * F.width;
if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 0){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] > 0 && foreground[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 0){
unsigned char Cmap = tex2D(CmapTexture, x-(F.width/2-j), y-F.height/2+i);
if (Cmap < 128+cMin)
continue;
dif.x = modeloColor[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch] - pixelM.x;
dif.y = modeloColor[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelM.y;
dif.z = modeloColor[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelM.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask[i*F.width+j] * weightR;
sum += input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] * mask[i*F.width+j] * weightR;
cont++;
}
}
}
}
if (10*cont > minPixel)
output[y*nc + x] = sum/norm;
else
output[y*nc + x] = pixelD;
}
else if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 255){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] > 0 && foreground[(y-F.height/2+i)*nc + (x-(F.width/2-j))] == 255){
dif.x = color[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch] - pixelC.x;
dif.y = color[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelC.y;
dif.z = color[(y-F.height/2+i)*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelC.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask[i*F.width+j] * weightR;
sum += input[(y-F.height/2+i)*nc + (x-(F.width/2-j))] * mask[i*F.width+j] * weightR;
}
}
}
}
output[y*nc + x] = sum/norm;
}
else
output[y*nc + x] = pixelD;
}
extern "C" void imageRellenar(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
imageRellenarKernel<<< grid, threads >>>(F, input, color, modeloColor, foreground, output, nl, nc, nch, sigmar, porcen, cMin);
}
// Realiza un filtrado bilateral conjunto para rellenar aquellos pixeles sin información que tienen un número mínimo de
// vecinos fiables (aproximación separable horizontal)
__global__ void imageRellenarKernelH(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
//int dif1, dif2, dif3;
int3 dif;
int cont = 0;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
if (pixelD > 0){
output[y*nc + x] = pixelD;
return;
}
uchar3 pixelC = {color[y*nc*nch + x*nch], color[y*nc*nch + x*nch + 1], color[y*nc*nch + x*nch + 2]};
uchar3 pixelM = {modeloColor[y*nc*nch + x*nch], modeloColor[y*nc*nch + x*nch + 1], modeloColor[y*nc*nch + x*nch + 2]};
//int _2sigmar2 = 200*sigmar*sigmar;
int minPixel = 10 * porcen * F.width;
if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 0){
//for (int i = 0; i < F.height; i++){
//if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[y*nc + (x-(F.width/2-j))] > 0 && foreground[y*nc + (x-(F.width/2-j))] == 0){
unsigned char Cmap = tex2D(CmapTexture, x-(F.width/2-j), y);
if (Cmap < 128+cMin)
continue;
dif.x = modeloColor[y*nc*nch + (x-(F.width/2-j))*nch] - pixelM.x;
dif.y = modeloColor[y*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelM.y;
dif.z = modeloColor[y*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelM.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[j] * weightR;
sum += input[y*nc + (x-(F.width/2-j))] * mask1D[j] * weightR;
cont++;
}
}
//}
//}
if (10*cont > minPixel)
output[y*nc + x] = sum/norm;
else
output[y*nc + x] = pixelD;
}
else {//if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 255){
//for (int i = 0; i < F.height; i++){
//if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
for (int j = 0; j < F.width; j++){
if ((x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && input[y*nc + (x-(F.width/2-j))] > 0 && foreground[y*nc + (x-(F.width/2-j))] == 255){
dif.x = color[y*nc*nch + (x-(F.width/2-j))*nch] - pixelC.x;
dif.y = color[y*nc*nch + (x-(F.width/2-j))*nch + 1] - pixelC.y;
dif.z = color[y*nc*nch + (x-(F.width/2-j))*nch + 2] - pixelC.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[j] * weightR;
sum += input[y*nc + (x-(F.width/2-j))] * mask1D[j] * weightR;
}
}
//}
//}
output[y*nc + x] = sum/norm;
}
//else
// output[y*nc + x] = pixelD;
}
extern "C" void imageRellenarH(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
imageRellenarKernelH<<< grid, threads >>>(F, input, color, modeloColor, foreground, output, nl, nc, nch, sigmar, porcen, cMin);
}
// Realiza un filtrado bilateral conjunto para rellenar aquellos pixeles sin información que tienen un número mínimo de
// vecinos fiables (aproximación separable vertical)
__global__ void imageRellenarKernelV(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
float sum= 0.0f;
float norm= 0.0f;
float weightR;
//int dif1, dif2, dif3;
int3 dif;
int cont = 0;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= nl || x >= nc)
return;
unsigned short int pixelD = input[y*nc + x];
if (pixelD > 0){
output[y*nc + x] = pixelD;
return;
}
uchar3 pixelC = {color[y*nc*nch + x*nch], color[y*nc*nch + x*nch + 1], color[y*nc*nch + x*nch + 2]};
uchar3 pixelM = {modeloColor[y*nc*nch + x*nch], modeloColor[y*nc*nch + x*nch + 1], modeloColor[y*nc*nch + x*nch + 2]};
//int _2sigmar2 = 200*sigmar*sigmar;
int minPixel = 10 * porcen * F.height;
if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 0){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
//for (int j = 0; j < F.width; j++){
if (/*(x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) &&*/ input[(y-F.height/2+i)*nc + x] > 0 && foreground[(y-F.height/2+i)*nc + x] == 0){
unsigned char Cmap = tex2D(CmapTexture, x, y-F.height/2+i);
if (Cmap < 128+cMin)
continue;
dif.x = modeloColor[(y-F.height/2+i)*nc*nch + x*nch] - pixelM.x;
dif.y = modeloColor[(y-F.height/2+i)*nc*nch + x*nch + 1] - pixelM.y;
dif.z = modeloColor[(y-F.height/2+i)*nc*nch + x*nch + 2] - pixelM.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[i] * weightR;
sum += input[(y-F.height/2+i)*nc + x] * mask1D[i] * weightR;
cont++;
}
//}
}
}
if (10*cont > minPixel)
output[y*nc + x] = sum/norm;
else
output[y*nc + x] = pixelD;
}
else{ //if (/*input[y*nc + x] == 0 && */foreground[y*nc + x] == 255){
for (int i = 0; i < F.height; i++){
if ((y-F.height/2+i >= 0) && (y-F.height/2+i < nl)){
//for (int j = 0; j < F.width; j++){
if (/*(x-(F.width/2-j) >= 0) && (x-(F.height/2-j) < nc) && */input[(y-F.height/2+i)*nc + x] > 0 && foreground[(y-F.height/2+i)*nc + x] == 255){
dif.x = color[(y-F.height/2+i)*nc*nch + x*nch] - pixelC.x;
dif.y = color[(y-F.height/2+i)*nc*nch + x*nch + 1] - pixelC.y;
dif.z = color[(y-F.height/2+i)*nc*nch + x*nch + 2] - pixelC.z;
//float A= 100*(dif.x*dif.x + dif.y*dif.y + dif.z*dif.z) /_2sigmar2;
//weightR = __expf(-A);//(2*PI*sigmar*sigmar);
weightR = pre_exp[dif.x] * pre_exp[dif.y] * pre_exp[dif.z];
norm += mask1D[i] * weightR;
sum += input[(y-F.height/2+i)*nc + x] * mask1D[i] * weightR;
}
//}
}
}
output[y*nc + x] = sum/norm;
}
//else
// output[y*nc + x] = pixelD;
}
extern "C" void imageRellenarV(const Matrix F, const unsigned short int* input, const uchar* color, const uchar* modeloColor, const uchar* foreground, unsigned short int* output, int nl, int nc, int nch, int sigmar, float porcen, int cMin)
{
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((nc+threads.x-1) / threads.x, (nl+threads.y-1) / threads.y);
// Invoke kernel
imageRellenarKernelV<<< grid, threads >>>(F, input, color, modeloColor, foreground, output, nl, nc, nch, sigmar, porcen, cMin);
}
|
2e7cc119a6732b5169183fb6c53f83e928772f2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <stdio.h>
#define BUF_SIZ 1000000
__global__ void addKernel(int *c , const int *a, const int *b){
int i = threadIdx.x;
c[i] = a[i]+ b[i];
}
__global__ void GetKernel(int *b)
{
int i = threadIdx.x;
b[i] = b[i] * 10;
}
int main(void)
{
int a[BUF_SIZ];
int b[BUF_SIZ];
int *ary1=0;
// int *ary2=0;
// int *ary3=0;
for(int i=0;i<BUF_SIZ;i++)
{
a[i] = i;
}
hipMalloc((void**)&ary1 , BUF_SIZ*sizeof(int));
// hipMalloc((void**)&ary2 , BUF_SIZ*sizeof(int));
// hipMalloc((void**)&ary3 , BUF_SIZ*sizeof(int));
// hipMemcpy(ary2, a, BUF_SIZ*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(ary1, a, BUF_SIZ*sizeof(int),hipMemcpyHostToDevice);
/*
for(int i=0;i<100000;i++)
{
ary3[i] = ary1[i] + ary2[i];
}
*/
printf("addkernel start\n");
// addKernel<<<1,3>>>(ary3,ary1,ary2);
hipLaunchKernelGGL(( GetKernel), dim3(1),dim3(3), 0, 0, ary1);
printf("addkernel end\n");
// hipMemcpy(b, ary3 ,BUF_SIZ*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(b, ary1 ,BUF_SIZ*sizeof(int),hipMemcpyDeviceToHost);
for(int i =0; i < BUF_SIZ; i++)
{
if(i/500 ==0)
{
printf("%d ",b[i]);
}
}
hipFree(ary1);
// hipFree(ary2);
// hipFree(ary3);
return 0;
}
| 2e7cc119a6732b5169183fb6c53f83e928772f2c.cu | #include <cstdio>
#include <stdio.h>
#define BUF_SIZ 1000000
__global__ void addKernel(int *c , const int *a, const int *b){
int i = threadIdx.x;
c[i] = a[i]+ b[i];
}
__global__ void GetKernel(int *b)
{
int i = threadIdx.x;
b[i] = b[i] * 10;
}
int main(void)
{
int a[BUF_SIZ];
int b[BUF_SIZ];
int *ary1=0;
// int *ary2=0;
// int *ary3=0;
for(int i=0;i<BUF_SIZ;i++)
{
a[i] = i;
}
cudaMalloc((void**)&ary1 , BUF_SIZ*sizeof(int));
// cudaMalloc((void**)&ary2 , BUF_SIZ*sizeof(int));
// cudaMalloc((void**)&ary3 , BUF_SIZ*sizeof(int));
// cudaMemcpy(ary2, a, BUF_SIZ*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(ary1, a, BUF_SIZ*sizeof(int),cudaMemcpyHostToDevice);
/*
for(int i=0;i<100000;i++)
{
ary3[i] = ary1[i] + ary2[i];
}
*/
printf("addkernel start\n");
// addKernel<<<1,3>>>(ary3,ary1,ary2);
GetKernel<<<1,3>>>(ary1);
printf("addkernel end\n");
// cudaMemcpy(b, ary3 ,BUF_SIZ*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(b, ary1 ,BUF_SIZ*sizeof(int),cudaMemcpyDeviceToHost);
for(int i =0; i < BUF_SIZ; i++)
{
if(i/500 ==0)
{
printf("%d ",b[i]);
}
}
cudaFree(ary1);
// cudaFree(ary2);
// cudaFree(ary3);
return 0;
}
|
3cbcb1f8990841c6a06e993dc742fbd695df9ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE TEST_HOW_CUFFT_WORKS
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ >= 9
#define __CUDACC_VER__ 90000
#endif
#endif
#include "boost/test/unit_test.hpp"
#include <numeric>
#include <vector>
#ifndef FC_TRACE
#define FC_TRACE false
#endif
#include "hipfft.h"
#include "cufft_test.cuh"
#include "test_utils.hpp"
#include "image_stack_utils.h"
#include "traits.hpp"
#include "book.h"
namespace fourierconvolution {
typedef boost::multi_array<hipfftComplex,3> frequ_stack;
__global__ void scale(hipfftComplex* _array, size_t _size, float _scale){
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
hipfftComplex el;
if(tid<_size){
el = _array[tid];
_array[tid].x = el.x*_scale;
_array[tid].y = el.y*_scale;
}
}
void inplace_fft_ifft(image_stack& _stack){
const size_t img_size = _stack.num_elements();
std::vector<size_t> shape(_stack.shape(),_stack.shape() + image_stack::dimensionality);
std::vector<size_t> shape_for_cufft(shape);
shape_for_cufft[row_major::x] = (shape[row_major::x]/2) + 1;
const size_t size_for_cufft = std::accumulate(shape_for_cufft.begin(), shape_for_cufft.end(),1,std::multiplies<size_t>());
hipfftComplex* d_stack = 0;
HANDLE_ERROR( hipMalloc( (void**)&(d_stack), size_for_cufft*sizeof(hipfftComplex) ) );
HANDLE_ERROR( hipMemset( d_stack, 0, size_for_cufft*sizeof(hipfftComplex) ));
//transform input data to cufft/fftw
frequ_stack cufft_compliant(shape_for_cufft);
float* stack_begin = _stack.data();
float* cufft_begin = reinterpret_cast<float*>(cufft_compliant.data());
for(size_t z = 0;z<shape[row_major::in_z];++z)
for(size_t y = 0;y<shape[row_major::in_y];++y){
size_t cufft_line_offset = (z*shape_for_cufft[row_major::in_y]*shape_for_cufft[row_major::in_x])+ (y*shape_for_cufft[row_major::in_x]);
cufft_begin = reinterpret_cast<float*>(&cufft_compliant.data()[cufft_line_offset]);
size_t stack_line_offset = (z*shape[row_major::in_y]*shape[row_major::in_x])+ (y*shape[row_major::in_x]);
stack_begin = &_stack.data()[stack_line_offset];
std::copy(stack_begin,stack_begin + shape[row_major::in_x],cufft_begin);
}
HANDLE_ERROR( hipMemcpy( d_stack, cufft_compliant.data(), size_for_cufft*sizeof(hipfftComplex) , hipMemcpyHostToDevice ) );
//FORWARD
hipfftHandle fftPlanFwd;
CUFFT_ERROR(hipfftPlan3d(&fftPlanFwd, shape[row_major::z], shape[row_major::y], shape[row_major::x], HIPFFT_R2C));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanFwd,HIPFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(hipfftExecR2C(fftPlanFwd, (hipfftReal*)d_stack, (hipfftComplex *)d_stack));
CUFFT_ERROR(hipfftDestroy(fftPlanFwd));
//apply scale
const float scale_ = 1.f/float(img_size);
unsigned threads = 32;
unsigned blocks = (size_for_cufft + threads -1) /threads;
hipLaunchKernelGGL(( scale), dim3(blocks),dim3(threads), 0, 0, d_stack,size_for_cufft,scale_);
//BACKWARD
hipfftHandle fftPlanInv;
CUFFT_ERROR(hipfftPlan3d(&fftPlanInv, shape[row_major::z], shape[row_major::y], shape[row_major::x], HIPFFT_C2R));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanInv,HIPFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(hipfftExecC2R(fftPlanInv, (hipfftComplex*)d_stack, (hipfftReal *)d_stack));
CUFFT_ERROR(hipfftDestroy(fftPlanInv) );
hipfftComplex zero;zero.x = 0;zero.y = 0;
std::fill(cufft_compliant.data(),cufft_compliant.data()+cufft_compliant.num_elements(),zero);
HANDLE_ERROR( hipMemcpy( cufft_compliant.data(), d_stack , size_for_cufft*sizeof(hipfftComplex) , hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipFree( d_stack));
for(size_t z = 0;z<shape[row_major::in_z];++z)
for(size_t y = 0;y<shape[row_major::in_y];++y){
size_t cufft_line_offset = (z*shape_for_cufft[row_major::in_y]*shape_for_cufft[row_major::in_x])+ (y*shape_for_cufft[row_major::in_x]);
cufft_begin = reinterpret_cast<float*>(&cufft_compliant.data()[cufft_line_offset]);
size_t stack_line_offset = (z*shape[row_major::in_y]*shape[row_major::in_x])+ (y*shape[row_major::in_x]);
stack_begin = &_stack.data()[stack_line_offset];
std::copy(cufft_begin,cufft_begin + shape[row_major::in_x],stack_begin);
}
return;
}
void outofplace_fft_ifft(const image_stack& _input, image_stack& _output){
std::vector<size_t> shape(_input.shape(),_input.shape() + 3);
const size_t stack_size = _input.num_elements();
if(_output.num_elements()!=stack_size)
_output.resize(shape);
std::fill(_output.data(),_output.data()+stack_size,0);
std::vector<size_t> shape_for_cufft(shape);
shape_for_cufft[row_major::x] = (shape[row_major::x]/2) + 1;
size_t size_for_cufft = std::accumulate(shape_for_cufft.begin(), shape_for_cufft.end(),1,std::multiplies<size_t>());
hipfftComplex* d_complex = 0;
hipfftReal* d_real = 0;
HANDLE_ERROR( hipMalloc( (void**)&(d_complex), size_for_cufft*sizeof(hipfftComplex) ) );
HANDLE_ERROR( hipMemset( d_complex, 0, size_for_cufft*sizeof(hipfftComplex) ));
HANDLE_ERROR( hipMalloc( (void**)&(d_real), stack_size*sizeof(hipfftComplex) ) );
HANDLE_ERROR( hipMemcpy( d_real, _input.data(), stack_size*sizeof(float) , hipMemcpyHostToDevice ) );
//FORWARD
hipfftHandle fftPlanFwd;
CUFFT_ERROR(hipfftPlan3d(&fftPlanFwd, shape[row_major::z], shape[row_major::y], shape[row_major::x], HIPFFT_R2C));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanFwd,HIPFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(hipfftExecR2C(fftPlanFwd, d_real, d_complex));
//apply scale
const float scale_ = 1.f/float(stack_size);
unsigned threads = 32;
unsigned blocks = (size_for_cufft + threads -1) /threads;
hipLaunchKernelGGL(( scale), dim3(blocks),dim3(threads), 0, 0, d_complex,size_for_cufft,scale_);
//BACKWARD
hipfftHandle fftPlanInv;
CUFFT_ERROR(hipfftPlan3d(&fftPlanInv, shape[row_major::z], shape[row_major::y], shape[row_major::x], HIPFFT_C2R));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanInv,HIPFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(hipfftExecC2R(fftPlanInv, d_complex, d_real));
std::fill(_output.data(),_output.data()+stack_size,0);
HANDLE_ERROR( hipMemcpy( _output.data(), d_real , stack_size*sizeof(float) , hipMemcpyDeviceToHost ) );
( hipfftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
( hipfftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( hipFree( d_real));
HANDLE_ERROR( hipFree( d_complex));
}
};
namespace fc = fourierconvolution;
BOOST_AUTO_TEST_SUITE(inplace)
BOOST_AUTO_TEST_CASE(of_prime_shape) {
std::vector<size_t> shape(3,17);
shape[fc::row_major::z] = 13;
shape[fc::row_major::x] = 19;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(of_prime_shape_symmetric) {
std::vector<size_t> shape(3,17);
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_2) {
std::vector<size_t> shape(3,16);
shape[fc::row_major::z] = 32;
shape[fc::row_major::x] = 8;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_3) {
std::vector<size_t> shape(3,27);
shape[fc::row_major::z] = 9;
shape[fc::row_major::x] = 3;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_5) {
std::vector<size_t> shape(3,25);
shape[fc::row_major::z] = 5;
shape[fc::row_major::x] = 125;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_7) {
std::vector<size_t> shape(3,::pow(7,2));
shape[fc::row_major::z] = 7;
shape[fc::row_major::x] = ::pow(7,3);
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(cube_128_shape) {
std::vector<size_t> shape(3,128);
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(outofplace)
BOOST_AUTO_TEST_CASE(of_prime_shape) {
std::vector<size_t> shape(3,17);
shape[fc::row_major::z] = 13;
shape[fc::row_major::x] = 19;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(of_prime_shape_symmetric) {
std::vector<size_t> shape(3,17);
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_2) {
std::vector<size_t> shape(3,16);
shape[fc::row_major::z] = 32;
shape[fc::row_major::x] = 8;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
std::cout << "\nl2norm = " << my_l2norm << "\n";
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_3) {
std::vector<size_t> shape(3,::pow(3,3));
shape[fc::row_major::z] = 9;
shape[fc::row_major::x] = 3;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received); const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
std::cout << "\nl2norm = " << my_l2norm << "\n";
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_5) {
std::vector<size_t> shape(3,::pow(5,2));
shape[fc::row_major::z] = 5;
shape[fc::row_major::x] = 125;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received); const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
std::cout << "\nl2norm = " << my_l2norm << "\n";
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_7) {
std::vector<size_t> shape(3,14);
shape[fc::row_major::z] = 7;
shape[fc::row_major::x] = ::pow(7,3);
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(cube_128_shape) {
std::vector<size_t> shape(3,128);
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(maweigert)
BOOST_AUTO_TEST_CASE(inplace_c2c_of_prime_shape) {
std::vector<size_t> shape(3,0);
shape[fc::row_major::x] = 13;
shape[fc::row_major::y] = 17;
shape[fc::row_major::z] = 19;
fc::frequ_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i){
stack.data()[i].x = i;
stack.data()[i].y = i;
}
fc::frequ_stack received = stack;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
cufft_c2c_ptr(stack.data(),shape[fc::row_major::x],shape[fc::row_major::y],shape[fc::row_major::z]);
double diff = 0;
for(size_t i = 0;i<stack.num_elements();++i){
double xtemp = stack.data()[i].x-received.data()[i].x;
double ytemp = stack.data()[i].y-received.data()[i].y;
diff += xtemp*xtemp;
diff += ytemp*ytemp;
}
double my_l2norm = std::sqrt(diff)/img_size;
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("maweigert shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(inplace_c2c_of_prime_shape_reversed) {
std::vector<size_t> shape(3,0);
shape[fc::row_major::x] = 13;
shape[fc::row_major::y] = 17;
shape[fc::row_major::z] = 19;
fc::frequ_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i){
stack.data()[i].x = i;
stack.data()[i].y = i;
}
fc::frequ_stack received = stack;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
cufft_c2c_ptr(stack.data(),shape[fc::row_major::z],shape[fc::row_major::y],shape[fc::row_major::x]);
double diff = 0;
for(size_t i = 0;i<stack.num_elements();++i){
double xtemp = stack.data()[i].x-received.data()[i].x;
double ytemp = stack.data()[i].y-received.data()[i].y;
diff += xtemp*xtemp;
diff += ytemp*ytemp;
}
double my_l2norm = std::sqrt(diff)/img_size;
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("maweigert shape(x,y,z)=" << shape[fc::row_major::z]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::x] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_SUITE_END()
| 3cbcb1f8990841c6a06e993dc742fbd695df9ba0.cu | #define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE TEST_HOW_CUFFT_WORKS
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ >= 9
#define __CUDACC_VER__ 90000
#endif
#endif
#include "boost/test/unit_test.hpp"
#include <numeric>
#include <vector>
#ifndef FC_TRACE
#define FC_TRACE false
#endif
#include "cufft.h"
#include "cufft_test.cuh"
#include "test_utils.hpp"
#include "image_stack_utils.h"
#include "traits.hpp"
#include "book.h"
namespace fourierconvolution {
typedef boost::multi_array<cufftComplex,3> frequ_stack;
__global__ void scale(cufftComplex* _array, size_t _size, float _scale){
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
cufftComplex el;
if(tid<_size){
el = _array[tid];
_array[tid].x = el.x*_scale;
_array[tid].y = el.y*_scale;
}
}
void inplace_fft_ifft(image_stack& _stack){
const size_t img_size = _stack.num_elements();
std::vector<size_t> shape(_stack.shape(),_stack.shape() + image_stack::dimensionality);
std::vector<size_t> shape_for_cufft(shape);
shape_for_cufft[row_major::x] = (shape[row_major::x]/2) + 1;
const size_t size_for_cufft = std::accumulate(shape_for_cufft.begin(), shape_for_cufft.end(),1,std::multiplies<size_t>());
cufftComplex* d_stack = 0;
HANDLE_ERROR( cudaMalloc( (void**)&(d_stack), size_for_cufft*sizeof(cufftComplex) ) );
HANDLE_ERROR( cudaMemset( d_stack, 0, size_for_cufft*sizeof(cufftComplex) ));
//transform input data to cufft/fftw
frequ_stack cufft_compliant(shape_for_cufft);
float* stack_begin = _stack.data();
float* cufft_begin = reinterpret_cast<float*>(cufft_compliant.data());
for(size_t z = 0;z<shape[row_major::in_z];++z)
for(size_t y = 0;y<shape[row_major::in_y];++y){
size_t cufft_line_offset = (z*shape_for_cufft[row_major::in_y]*shape_for_cufft[row_major::in_x])+ (y*shape_for_cufft[row_major::in_x]);
cufft_begin = reinterpret_cast<float*>(&cufft_compliant.data()[cufft_line_offset]);
size_t stack_line_offset = (z*shape[row_major::in_y]*shape[row_major::in_x])+ (y*shape[row_major::in_x]);
stack_begin = &_stack.data()[stack_line_offset];
std::copy(stack_begin,stack_begin + shape[row_major::in_x],cufft_begin);
}
HANDLE_ERROR( cudaMemcpy( d_stack, cufft_compliant.data(), size_for_cufft*sizeof(cufftComplex) , cudaMemcpyHostToDevice ) );
//FORWARD
cufftHandle fftPlanFwd;
CUFFT_ERROR(cufftPlan3d(&fftPlanFwd, shape[row_major::z], shape[row_major::y], shape[row_major::x], CUFFT_R2C));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(cufftExecR2C(fftPlanFwd, (cufftReal*)d_stack, (cufftComplex *)d_stack));
CUFFT_ERROR(cufftDestroy(fftPlanFwd));
//apply scale
const float scale_ = 1.f/float(img_size);
unsigned threads = 32;
unsigned blocks = (size_for_cufft + threads -1) /threads;
scale<<<blocks,threads>>>(d_stack,size_for_cufft,scale_);
//BACKWARD
cufftHandle fftPlanInv;
CUFFT_ERROR(cufftPlan3d(&fftPlanInv, shape[row_major::z], shape[row_major::y], shape[row_major::x], CUFFT_C2R));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(cufftExecC2R(fftPlanInv, (cufftComplex*)d_stack, (cufftReal *)d_stack));
CUFFT_ERROR(cufftDestroy(fftPlanInv) );
cufftComplex zero;zero.x = 0;zero.y = 0;
std::fill(cufft_compliant.data(),cufft_compliant.data()+cufft_compliant.num_elements(),zero);
HANDLE_ERROR( cudaMemcpy( cufft_compliant.data(), d_stack , size_for_cufft*sizeof(cufftComplex) , cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaFree( d_stack));
for(size_t z = 0;z<shape[row_major::in_z];++z)
for(size_t y = 0;y<shape[row_major::in_y];++y){
size_t cufft_line_offset = (z*shape_for_cufft[row_major::in_y]*shape_for_cufft[row_major::in_x])+ (y*shape_for_cufft[row_major::in_x]);
cufft_begin = reinterpret_cast<float*>(&cufft_compliant.data()[cufft_line_offset]);
size_t stack_line_offset = (z*shape[row_major::in_y]*shape[row_major::in_x])+ (y*shape[row_major::in_x]);
stack_begin = &_stack.data()[stack_line_offset];
std::copy(cufft_begin,cufft_begin + shape[row_major::in_x],stack_begin);
}
return;
}
void outofplace_fft_ifft(const image_stack& _input, image_stack& _output){
std::vector<size_t> shape(_input.shape(),_input.shape() + 3);
const size_t stack_size = _input.num_elements();
if(_output.num_elements()!=stack_size)
_output.resize(shape);
std::fill(_output.data(),_output.data()+stack_size,0);
std::vector<size_t> shape_for_cufft(shape);
shape_for_cufft[row_major::x] = (shape[row_major::x]/2) + 1;
size_t size_for_cufft = std::accumulate(shape_for_cufft.begin(), shape_for_cufft.end(),1,std::multiplies<size_t>());
cufftComplex* d_complex = 0;
cufftReal* d_real = 0;
HANDLE_ERROR( cudaMalloc( (void**)&(d_complex), size_for_cufft*sizeof(cufftComplex) ) );
HANDLE_ERROR( cudaMemset( d_complex, 0, size_for_cufft*sizeof(cufftComplex) ));
HANDLE_ERROR( cudaMalloc( (void**)&(d_real), stack_size*sizeof(cufftComplex) ) );
HANDLE_ERROR( cudaMemcpy( d_real, _input.data(), stack_size*sizeof(float) , cudaMemcpyHostToDevice ) );
//FORWARD
cufftHandle fftPlanFwd;
CUFFT_ERROR(cufftPlan3d(&fftPlanFwd, shape[row_major::z], shape[row_major::y], shape[row_major::x], CUFFT_R2C));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanFwd,CUFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(cufftExecR2C(fftPlanFwd, d_real, d_complex));
//apply scale
const float scale_ = 1.f/float(stack_size);
unsigned threads = 32;
unsigned blocks = (size_for_cufft + threads -1) /threads;
scale<<<blocks,threads>>>(d_complex,size_for_cufft,scale_);
//BACKWARD
cufftHandle fftPlanInv;
CUFFT_ERROR(cufftPlan3d(&fftPlanInv, shape[row_major::z], shape[row_major::y], shape[row_major::x], CUFFT_C2R));
if(CUDART_VERSION < 6050)
CUFFT_ERROR(cufftSetCompatibilityMode(fftPlanInv,CUFFT_COMPATIBILITY_FFTW_PADDING));
CUFFT_ERROR(cufftExecC2R(fftPlanInv, d_complex, d_real));
std::fill(_output.data(),_output.data()+stack_size,0);
HANDLE_ERROR( cudaMemcpy( _output.data(), d_real , stack_size*sizeof(float) , cudaMemcpyDeviceToHost ) );
( cufftDestroy(fftPlanInv) );HANDLE_ERROR_KERNEL;
( cufftDestroy(fftPlanFwd) );HANDLE_ERROR_KERNEL;
HANDLE_ERROR( cudaFree( d_real));
HANDLE_ERROR( cudaFree( d_complex));
}
};
namespace fc = fourierconvolution;
BOOST_AUTO_TEST_SUITE(inplace)
BOOST_AUTO_TEST_CASE(of_prime_shape) {
std::vector<size_t> shape(3,17);
shape[fc::row_major::z] = 13;
shape[fc::row_major::x] = 19;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(of_prime_shape_symmetric) {
std::vector<size_t> shape(3,17);
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_2) {
std::vector<size_t> shape(3,16);
shape[fc::row_major::z] = 32;
shape[fc::row_major::x] = 8;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_3) {
std::vector<size_t> shape(3,27);
shape[fc::row_major::z] = 9;
shape[fc::row_major::x] = 3;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_5) {
std::vector<size_t> shape(3,25);
shape[fc::row_major::z] = 5;
shape[fc::row_major::x] = 125;
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_7) {
std::vector<size_t> shape(3,std::pow(7,2));
shape[fc::row_major::z] = 7;
shape[fc::row_major::x] = std::pow(7,3);
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(cube_128_shape) {
std::vector<size_t> shape(3,128);
fc::image_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
fc::image_stack received(stack);
fc::inplace_fft_ifft(received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << boost::unit_test::framework::current_test_case().p_name << "\n";
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("inplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(outofplace)
BOOST_AUTO_TEST_CASE(of_prime_shape) {
std::vector<size_t> shape(3,17);
shape[fc::row_major::z] = 13;
shape[fc::row_major::x] = 19;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(of_prime_shape_symmetric) {
std::vector<size_t> shape(3,17);
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-1;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_2) {
std::vector<size_t> shape(3,16);
shape[fc::row_major::z] = 32;
shape[fc::row_major::x] = 8;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
std::cout << "\nl2norm = " << my_l2norm << "\n";
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_3) {
std::vector<size_t> shape(3,std::pow(3,3));
shape[fc::row_major::z] = 9;
shape[fc::row_major::x] = 3;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received); const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
std::cout << "\nl2norm = " << my_l2norm << "\n";
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_5) {
std::vector<size_t> shape(3,std::pow(5,2));
shape[fc::row_major::z] = 5;
shape[fc::row_major::x] = 125;
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received); const double expected = 1e-4;
const bool result = my_l2norm<expected;
if(!result && FC_TRACE){
std::cout << "expected:\n";
fc::print_stack(stack);
std::cout << "\n\nreceived:\n";
fc::print_stack(received);
std::cout << "\nl2norm = " << my_l2norm << "\n";
}
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(power_of_7) {
std::vector<size_t> shape(3,14);
shape[fc::row_major::z] = 7;
shape[fc::row_major::x] = std::pow(7,3);
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(cube_128_shape) {
std::vector<size_t> shape(3,128);
fc::image_stack stack(shape);
fc::image_stack received(shape);
for(size_t i = 0;i<stack.num_elements();++i)
stack.data()[i] = i;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
fc::outofplace_fft_ifft(stack, received);
double my_l2norm = l2norm(stack,received);
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("outofplace shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(maweigert)
BOOST_AUTO_TEST_CASE(inplace_c2c_of_prime_shape) {
std::vector<size_t> shape(3,0);
shape[fc::row_major::x] = 13;
shape[fc::row_major::y] = 17;
shape[fc::row_major::z] = 19;
fc::frequ_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i){
stack.data()[i].x = i;
stack.data()[i].y = i;
}
fc::frequ_stack received = stack;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
cufft_c2c_ptr(stack.data(),shape[fc::row_major::x],shape[fc::row_major::y],shape[fc::row_major::z]);
double diff = 0;
for(size_t i = 0;i<stack.num_elements();++i){
double xtemp = stack.data()[i].x-received.data()[i].x;
double ytemp = stack.data()[i].y-received.data()[i].y;
diff += xtemp*xtemp;
diff += ytemp*ytemp;
}
double my_l2norm = std::sqrt(diff)/img_size;
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("maweigert shape(x,y,z)=" << shape[fc::row_major::x]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::z] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_CASE(inplace_c2c_of_prime_shape_reversed) {
std::vector<size_t> shape(3,0);
shape[fc::row_major::x] = 13;
shape[fc::row_major::y] = 17;
shape[fc::row_major::z] = 19;
fc::frequ_stack stack(shape);
for(size_t i = 0;i<stack.num_elements();++i){
stack.data()[i].x = i;
stack.data()[i].y = i;
}
fc::frequ_stack received = stack;
size_t img_size = std::accumulate(shape.begin(), shape.end(),1,std::multiplies<size_t>());
BOOST_REQUIRE(img_size > 32);
cufft_c2c_ptr(stack.data(),shape[fc::row_major::z],shape[fc::row_major::y],shape[fc::row_major::x]);
double diff = 0;
for(size_t i = 0;i<stack.num_elements();++i){
double xtemp = stack.data()[i].x-received.data()[i].x;
double ytemp = stack.data()[i].y-received.data()[i].y;
diff += xtemp*xtemp;
diff += ytemp*ytemp;
}
double my_l2norm = std::sqrt(diff)/img_size;
const double expected = 1e-3;
const bool result = my_l2norm<expected;
BOOST_TEST_MESSAGE("maweigert shape(x,y,z)=" << shape[fc::row_major::z]<< ", " << shape[fc::row_major::y]<< ", " << shape[fc::row_major::x] << "\tl2norm = " << my_l2norm);
BOOST_REQUIRE_MESSAGE(result,"l2norm = "<< my_l2norm <<" not smaller than " << expected);
}
BOOST_AUTO_TEST_SUITE_END()
|
033126df5720bfdcdc95d3acf678587b6b3fbc9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THH/THHTensorMathPointwise.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
template <typename scalar_t>
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, Scalar beta, const Tensor& t, Scalar alpha, LongTensor& indices, Tensor& values, const Tensor& dense) {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
Tensor r__;
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
r_.copy_(r__);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
s_addmm_out_sparse_dense_cuda_worker<scalar_t>(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
}
);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
LongTensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value);
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
LongTensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
LongTensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
LongTensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(hipGetLastError());
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, deterministic);
}
#if !(defined(__HIP_PLATFORM_HCC__) || defined(_WIN32) || defined(_WIN64))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const LongTensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2) {
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor& _bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2, bool deterministic) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010)
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = coalesce_sparse_cuda(self);
int64_t nnz = self_coalesced._nnz();
LongTensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
LongTensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
int64_t mat_el_end_indices_host[num_matrices];
int64_t* mat_el_end_indices_device;
hipMalloc(&mat_el_end_indices_device, num_matrices*sizeof(int64_t));
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
hipMemcpy(
mat_el_end_indices_host,
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
);
hipFree(mat_el_end_indices_device);
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
deterministic = deterministic || globalContext().deterministic();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
hipMallocManaged(&workspace_buffer, workspace_buffer_size);
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
| 033126df5720bfdcdc95d3acf678587b6b3fbc9a.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <THC/THCTensorMathPointwise.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) {
IntTensor csr = at::empty({dim+1}, CUDA(kInt));
IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
template <typename scalar_t>
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, Scalar beta, const Tensor& t, Scalar alpha, LongTensor& indices, Tensor& values, const Tensor& dense) {
scalar_t cast_beta = beta.to<scalar_t>();
scalar_t cast_alpha = alpha.to<scalar_t>();
LongTensor rowIndices = indices.select(0, 0);
LongTensor colIndices = indices.select(0, 1);
IntTensor csr = _to_csr_int(rowIndices, m, nnz);
IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
colIndicesInt.copy_(colIndices);
Tensor r__;
if (cast_beta == 0) {
r_.zero_();
} else if (cast_beta == 1) {
if (!is_same_tensor(t, r_)) {
r_.copy_(t);
}
} else {
at::mul_out(r_, t, scalar_to_tensor(beta));
}
if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) {
r__ = r_;
} else {
// TODO: how... strange
r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous);
r__.transpose_(0, 1);
}
if (nnz > 0) {
Tensor dense_;
char transpose_dense;
if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) {
transpose_dense = 'n';
dense_ = dense;
} else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) {
transpose_dense = 't';
dense_ = dense;
} else {
transpose_dense = 't';
dense_ = dense.contiguous();
}
sparse::cuda::csrmm2(
'n',
transpose_dense,
m,
n,
k,
nnz,
cast_alpha,
values.data_ptr<scalar_t>(),
csr.data_ptr<int32_t>(),
colIndicesInt.data_ptr<int32_t>(),
dense_.data_ptr<scalar_t>(),
(transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)),
cast_beta,
r__.data_ptr<scalar_t>(),
r__.stride(1));
}
r_.copy_(r__);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) {
TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = sparse._indices();
Tensor values = sparse._values();
// No half support, so we don't have to use CUDATypeConversion
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "addmm_sparse_cuda", [&] {
s_addmm_out_sparse_dense_cuda_worker<scalar_t>(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
}
);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
Tensor& result,
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
Scalar beta,
Scalar alpha
) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
Scalar beta,
Scalar alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
LongTensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
LongTensor spIndices = newSparse._indices();
LongTensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(r, sparse, dense);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
LongTensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
});
});
}
} else {
LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
// FIXME: at some point we can wrap the scale into indexAdd
// NB: Purposely not inplace!
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
values = values.mul(value);
}
});
});
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values);
}
THCudaCheck(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value);
SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
LongTensor t_indices_ = t._indices();
LongTensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != static_cast<scalar_t>(1)) {
s_values_ = s_values_.mul(value);
}
});
});
LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
LongTensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
LongTensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
LongTensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
THCudaCheck(cudaGetLastError());
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
THCudaCheck(cudaGetLastError());
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti
) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
LongTensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
LongTensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
LongTensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor _bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2, bool deterministic) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return _bmm_out_sparse_cuda(result, self, mat2, deterministic);
}
#if !(defined(__HIP_PLATFORM_HCC__) || defined(_WIN32) || defined(_WIN64))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const LongTensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2) {
return _bmm_out_sparse_cuda(result, self, mat2, false);
}
Tensor& _bmm_out_sparse_cuda(Tensor& result, const SparseTensor& self, const Tensor& mat2, bool deterministic) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010)
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = coalesce_sparse_cuda(self);
int64_t nnz = self_coalesced._nnz();
LongTensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
LongTensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
int64_t mat_el_end_indices_host[num_matrices];
int64_t* mat_el_end_indices_device;
cudaMalloc(&mat_el_end_indices_device, num_matrices*sizeof(int64_t));
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
cudaMemcpy(
mat_el_end_indices_host,
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
);
cudaFree(mat_el_end_indices_device);
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
deterministic = deterministic || globalContext().deterministic();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
cudaMallocManaged(&workspace_buffer, workspace_buffer_size);
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
7798d26756cb191e652e068e9e2dd138741a0c47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t num_cycles)
{
int64_t cycles = 0;
int64_t start = clock64();
while(cycles < num_cycles) {
cycles = clock64() - start;
}
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(int seconds)
{
// Get device frequency in Hz
int64_t Hz;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = seconds * Hz;
return num_cycles;
}
// Launches a kernel that sleeps for at least num_cycles
extern "C" void sleep_kernel(int64_t num_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel
hipLaunchKernelGGL(( sleep), dim3(gridSize), dim3(blockSize), 0, 0, num_cycles);
}
// Wait for all pending GPU transactions to end
extern "C" void wait_for_gpu()
{
hipDeviceSynchronize();
}
| 7798d26756cb191e652e068e9e2dd138741a0c47.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t num_cycles)
{
int64_t cycles = 0;
int64_t start = clock64();
while(cycles < num_cycles) {
cycles = clock64() - start;
}
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(int seconds)
{
// Get device frequency in Hz
int64_t Hz;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = seconds * Hz;
return num_cycles;
}
// Launches a kernel that sleeps for at least num_cycles
extern "C" void sleep_kernel(int64_t num_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel
sleep<<<gridSize, blockSize>>>(num_cycles);
}
// Wait for all pending GPU transactions to end
extern "C" void wait_for_gpu()
{
cudaDeviceSynchronize();
}
|
68469efcd296391de95b54b69804fb28f1681136.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "elementwise.cuh"
#include "math.h"
template <typename T>
__global__ void device_copy_pooler(T* out,
T* in,
size_t hidden_size,
size_t seq_length,
size_t batchsize){
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x ;
i < batchsize * hidden_size; i += gridDim.x * blockDim.x){
size_t num_batch = i / hidden_size;
out[i] = in[num_batch * seq_length * hidden_size + i%hidden_size];
}
__syncthreads();
}
template <typename T>
void copy_pooler(T* &output, T* tensor, global_handle* handle){
output = handle->global_malloc_manage_float.get_new_head_point(
handle->batchsize * handle->hidden_size);
dim3 threads(handle->hidden_size, 1, 1);
dim3 blocks(min(long(65535), handle->batchsize), 1, 1);
hipLaunchKernelGGL(( device_copy_pooler), dim3(blocks), dim3(threads), 0, handle->cal_stream,
output,
tensor,
handle->hidden_size,
handle->seq_length,
handle->batchsize);
}
template
void copy_pooler<float>(float* &output, float* tensor, global_handle* handle);
template <typename T>
__global__ void gelu (T* tensor, size_t max_num) {
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x ; i < max_num; i += gridDim.x * blockDim.x)
tensor[i] = tensor[i] * 0.5f * (1.0f + erff(tensor[i] / sqrtf(2.0)));
__syncthreads();
}
template <typename T>
void op_Gelu::forward (T* tensor, size_t max_num) {
dim3 threads(1024, 1, 1);
dim3 blocks(min((long)65535, max_num / 1024) + 1, 1, 1);
hipLaunchKernelGGL(( gelu), dim3(blocks), dim3(threads), 0, handle->cal_stream,
tensor,
max_num);
}
template
void op_Gelu::forward<float>(float* tensor, size_t max_num);
template <typename T>
__global__ void Tanh (T* tensor, size_t max_num) {
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x ; i < max_num; i += gridDim.x * blockDim.x)
tensor[i] = tanh(tensor[i]);
__syncthreads();
}
template <typename T>
void op_Tanh::forward (T* tensor, size_t max_num){
dim3 threads(1024, 1, 1);
dim3 blocks(min((long)65535, max_num / 1024) + 1, 1, 1);
hipLaunchKernelGGL(( Tanh), dim3(blocks), dim3(threads), 0, handle->cal_stream,
tensor,
max_num);
}
template
void op_Tanh::forward<float>(float* tensor, size_t max_num);
| 68469efcd296391de95b54b69804fb28f1681136.cu | #include "elementwise.cuh"
#include "math.h"
template <typename T>
__global__ void device_copy_pooler(T* out,
T* in,
size_t hidden_size,
size_t seq_length,
size_t batchsize){
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x ;
i < batchsize * hidden_size; i += gridDim.x * blockDim.x){
size_t num_batch = i / hidden_size;
out[i] = in[num_batch * seq_length * hidden_size + i%hidden_size];
}
__syncthreads();
}
template <typename T>
void copy_pooler(T* &output, T* tensor, global_handle* handle){
output = handle->global_malloc_manage_float.get_new_head_point(
handle->batchsize * handle->hidden_size);
dim3 threads(handle->hidden_size, 1, 1);
dim3 blocks(min(long(65535), handle->batchsize), 1, 1);
device_copy_pooler<<<blocks, threads, 0, handle->cal_stream>>>(
output,
tensor,
handle->hidden_size,
handle->seq_length,
handle->batchsize);
}
template
void copy_pooler<float>(float* &output, float* tensor, global_handle* handle);
template <typename T>
__global__ void gelu (T* tensor, size_t max_num) {
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x ; i < max_num; i += gridDim.x * blockDim.x)
tensor[i] = tensor[i] * 0.5f * (1.0f + erff(tensor[i] / sqrtf(2.0)));
__syncthreads();
}
template <typename T>
void op_Gelu::forward (T* tensor, size_t max_num) {
dim3 threads(1024, 1, 1);
dim3 blocks(min((long)65535, max_num / 1024) + 1, 1, 1);
gelu<<<blocks, threads, 0, handle->cal_stream>>>(
tensor,
max_num);
}
template
void op_Gelu::forward<float>(float* tensor, size_t max_num);
template <typename T>
__global__ void Tanh (T* tensor, size_t max_num) {
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x ; i < max_num; i += gridDim.x * blockDim.x)
tensor[i] = tanh(tensor[i]);
__syncthreads();
}
template <typename T>
void op_Tanh::forward (T* tensor, size_t max_num){
dim3 threads(1024, 1, 1);
dim3 blocks(min((long)65535, max_num / 1024) + 1, 1, 1);
Tanh<<<blocks, threads, 0, handle->cal_stream>>>(
tensor,
max_num);
}
template
void op_Tanh::forward<float>(float* tensor, size_t max_num);
|
ae084f0165c04264e188154e9edd4fcce7eaf584.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
iteration = 2;
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
printf("Ending simulation\n");
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
| ae084f0165c04264e188154e9edd4fcce7eaf584.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
iteration = 2;
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
printf("Ending simulation\n");
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
2039431d57d9efcb2ba17bdf7038aca0578ab9d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "petscconf.h"
PETSC_CUDA_EXTERN_C_BEGIN
#include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
PETSC_CUDA_EXTERN_C_END
#include "mpicuspmatimpl.h"
#undef __FUNCT__
#define __FUNCT__ "MatMPIAIJSetPreallocation_MPIAIJCUSP"
PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSP(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
{
Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
Mat_MPIAIJCUSP * cuspStruct = (Mat_MPIAIJCUSP*)b->spptr;
PetscErrorCode ierr;
PetscInt i;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
if (d_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
}
}
if (o_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
}
}
if (!B->preallocated) {
/* Explicitly create 2 MATSEQAIJCUSP matrices. */
ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(b->A,MATSEQAIJCUSP);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr);
ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetType(b->B,MATSEQAIJCUSP);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr);
}
ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
ierr = MatCUSPSetFormat(b->A,MAT_CUSP_MULT,cuspStruct->diagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPSetFormat(b->B,MAT_CUSP_MULT,cuspStruct->offdiagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPSetStream(b->A,cuspStruct->stream);CHKERRQ(ierr);
ierr = MatCUSPSetStream(b->B,cuspStruct->stream);CHKERRQ(ierr);
B->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatGetVecs_MPIAIJCUSP"
PetscErrorCode MatGetVecs_MPIAIJCUSP(Mat mat,Vec *right,Vec *left)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (right) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*right,mat->rmap->bs);CHKERRQ(ierr);
ierr = VecSetType(*right,VECCUSP);CHKERRQ(ierr);
ierr = VecSetLayout(*right,mat->cmap);CHKERRQ(ierr);
}
if (left) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*left,mat->rmap->bs);CHKERRQ(ierr);
ierr = VecSetType(*left,VECCUSP);CHKERRQ(ierr);
ierr = VecSetLayout(*left,mat->rmap);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatMult_MPIAIJCUSP"
PetscErrorCode MatMult_MPIAIJCUSP(Mat A,Vec xx,Vec yy)
{
/* This multiplication sequence is different sequence
than the CPU version. In particular, the diagonal block
multiplication kernel is launched in one stream. Then,
in a separate stream, the data transfers from DeviceToHost
(with MPI messaging in between), then HostToDevice are
launched. Once the data transfer stream is synchronized,
to ensure messaging is complete, the MatMultAdd kernel
is launched in the original (MatMult) stream to protect
against race conditions.
This sequence should only be called for GPU computation. */
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
PetscErrorCode ierr;
PetscInt nt;
PetscFunctionBegin;
ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
ierr = VecScatterInitializeForGPU(a->Mvctx,xx,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSetValuesBatch_MPIAIJCUSP(Mat J, PetscInt Ne, PetscInt Nl, PetscInt *elemRows, const PetscScalar *elemMats);
#undef __FUNCT__
#define __FUNCT__ "MatCUSPSetFormat_MPIAIJCUSP"
PetscErrorCode MatCUSPSetFormat_MPIAIJCUSP(Mat A,MatCUSPFormatOperation op,MatCUSPStorageFormat format)
{
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSP * cuspStruct = (Mat_MPIAIJCUSP*)a->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSP_MULT_DIAG:
cuspStruct->diagGPUMatFormat = format;
break;
case MAT_CUSP_MULT_OFFDIAG:
cuspStruct->offdiagGPUMatFormat = format;
break;
case MAT_CUSP_ALL:
cuspStruct->diagGPUMatFormat = format;
cuspStruct->offdiagGPUMatFormat = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPFormatOperation. Only MAT_CUSP_MULT_DIAG, MAT_CUSP_MULT_DIAG, and MAT_CUSP_MULT_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatSetFromOptions_MPIAIJCUSP"
PetscErrorCode MatSetFromOptions_MPIAIJCUSP(Mat A)
{
MatCUSPStorageFormat format;
PetscErrorCode ierr;
PetscBool flg;
PetscFunctionBegin;
ierr = PetscOptionsHead("MPIAIJCUSP options");CHKERRQ(ierr);
ierr = PetscObjectOptionsBegin((PetscObject)A);
if (A->factortype==MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusp_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusp gpu matrices for SpMV",
"MatCUSPSetFormat",MatCUSPStorageFormats,(PetscEnum)MAT_CUSP_CSR,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPSetFormat(A,MAT_CUSP_MULT_DIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusp_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusp gpu matrices for SpMV",
"MatCUSPSetFormat",MatCUSPStorageFormats,(PetscEnum)MAT_CUSP_CSR,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPSetFormat(A,MAT_CUSP_MULT_OFFDIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusp_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusp gpu matrices for SpMV",
"MatCUSPSetFormat",MatCUSPStorageFormats,(PetscEnum)MAT_CUSP_CSR,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPSetFormat(A,MAT_CUSP_ALL,format);CHKERRQ(ierr);
}
}
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatDestroy_MPIAIJCUSP"
PetscErrorCode MatDestroy_MPIAIJCUSP(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSP *cuspStruct = (Mat_MPIAIJCUSP*)a->spptr;
hipError_t err=hipSuccess;
PetscFunctionBegin;
try {
err = hipStreamDestroy(cuspStruct->stream);
if (err!=hipSuccess)
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSP error: %s", hipGetErrorString(err));
delete cuspStruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSP error: %s", ex);
}
cuspStruct = 0;
ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatCreate_MPIAIJCUSP"
PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSP(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a;
Mat_MPIAIJCUSP * cuspStruct;
hipError_t err=hipSuccess;
PetscFunctionBegin;
ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSP);CHKERRQ(ierr);
A->ops->getvecs = MatGetVecs_MPIAIJCUSP;
A->ops->setvaluesbatch = MatSetValuesBatch_MPIAIJCUSP;
a = (Mat_MPIAIJ*)A->data;
a->spptr = new Mat_MPIAIJCUSP;
cuspStruct = (Mat_MPIAIJCUSP*)a->spptr;
cuspStruct->diagGPUMatFormat = MAT_CUSP_CSR;
cuspStruct->offdiagGPUMatFormat = MAT_CUSP_CSR;
err = hipStreamCreate(&(cuspStruct->stream));
if (err!=hipSuccess)
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSP error: %s", hipGetErrorString(err));
A->ops->mult = MatMult_MPIAIJCUSP;
A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSP;
A->ops->destroy = MatDestroy_MPIAIJCUSP;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPSetFormat_C", MatCUSPSetFormat_MPIAIJCUSP);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSP);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatCreateAIJCUSP - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSP library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradigm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSP, MATAIJCUSP
@*/
#undef __FUNCT__
#define __FUNCT__ "MatCreateAIJCUSP"
PetscErrorCode MatCreateAIJCUSP(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) {
ierr = MatSetType(*A,MATMPIAIJCUSP);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
} else {
ierr = MatSetType(*A,MATSEQAIJCUSP);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*M
MATAIJCUSP - MATMPIAIJCUSP= "aijcusp" = "mpiaijcusp" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be CSR format.
All matrix calculations are performed using the CUSP library. DIA and ELL
formats are also available
This matrix type is identical to MATSEQAIJCUSP when constructed with a single process communicator,
and MATMPIAIJCUSP otherwise. As a result, for single process communicators,
MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
for communicators controlling multiple processes. It is recommended that you call both of
the above preallocation routines for simplicity.
Options Database Keys:
+ -mat_type mpiaijcusp - sets the matrix type to "mpiaijcusp" during a call to MatSetFromOptions()
. -mat_cusp_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other storage formats include dia (diagonal) or ell (ellpack).
. -mat_cusp_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other storage formats include dia (diagonal) or ell (ellpack).
- -mat_cusp_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other storage formats include dia (diagonal) or ell (ellpack).
Level: beginner
.seealso: MatCreateAIJCUSP(), MATSEQAIJCUSP, MatCreateSeqAIJCUSP(), MatCUSPSetFormat(), MatCUSPStorageFormat, MatCUSPFormatOperation
M*/
| 2039431d57d9efcb2ba17bdf7038aca0578ab9d3.cu | #include "petscconf.h"
PETSC_CUDA_EXTERN_C_BEGIN
#include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
PETSC_CUDA_EXTERN_C_END
#include "mpicuspmatimpl.h"
#undef __FUNCT__
#define __FUNCT__ "MatMPIAIJSetPreallocation_MPIAIJCUSP"
PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSP(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
{
Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
Mat_MPIAIJCUSP * cuspStruct = (Mat_MPIAIJCUSP*)b->spptr;
PetscErrorCode ierr;
PetscInt i;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
if (d_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
}
}
if (o_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
}
}
if (!B->preallocated) {
/* Explicitly create 2 MATSEQAIJCUSP matrices. */
ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(b->A,MATSEQAIJCUSP);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr);
ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetType(b->B,MATSEQAIJCUSP);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr);
}
ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
ierr = MatCUSPSetFormat(b->A,MAT_CUSP_MULT,cuspStruct->diagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPSetFormat(b->B,MAT_CUSP_MULT,cuspStruct->offdiagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPSetStream(b->A,cuspStruct->stream);CHKERRQ(ierr);
ierr = MatCUSPSetStream(b->B,cuspStruct->stream);CHKERRQ(ierr);
B->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatGetVecs_MPIAIJCUSP"
PetscErrorCode MatGetVecs_MPIAIJCUSP(Mat mat,Vec *right,Vec *left)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (right) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*right,mat->rmap->bs);CHKERRQ(ierr);
ierr = VecSetType(*right,VECCUSP);CHKERRQ(ierr);
ierr = VecSetLayout(*right,mat->cmap);CHKERRQ(ierr);
}
if (left) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*left,mat->rmap->bs);CHKERRQ(ierr);
ierr = VecSetType(*left,VECCUSP);CHKERRQ(ierr);
ierr = VecSetLayout(*left,mat->rmap);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatMult_MPIAIJCUSP"
PetscErrorCode MatMult_MPIAIJCUSP(Mat A,Vec xx,Vec yy)
{
/* This multiplication sequence is different sequence
than the CPU version. In particular, the diagonal block
multiplication kernel is launched in one stream. Then,
in a separate stream, the data transfers from DeviceToHost
(with MPI messaging in between), then HostToDevice are
launched. Once the data transfer stream is synchronized,
to ensure messaging is complete, the MatMultAdd kernel
is launched in the original (MatMult) stream to protect
against race conditions.
This sequence should only be called for GPU computation. */
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
PetscErrorCode ierr;
PetscInt nt;
PetscFunctionBegin;
ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
ierr = VecScatterInitializeForGPU(a->Mvctx,xx,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatSetValuesBatch_MPIAIJCUSP(Mat J, PetscInt Ne, PetscInt Nl, PetscInt *elemRows, const PetscScalar *elemMats);
#undef __FUNCT__
#define __FUNCT__ "MatCUSPSetFormat_MPIAIJCUSP"
PetscErrorCode MatCUSPSetFormat_MPIAIJCUSP(Mat A,MatCUSPFormatOperation op,MatCUSPStorageFormat format)
{
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSP * cuspStruct = (Mat_MPIAIJCUSP*)a->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSP_MULT_DIAG:
cuspStruct->diagGPUMatFormat = format;
break;
case MAT_CUSP_MULT_OFFDIAG:
cuspStruct->offdiagGPUMatFormat = format;
break;
case MAT_CUSP_ALL:
cuspStruct->diagGPUMatFormat = format;
cuspStruct->offdiagGPUMatFormat = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPFormatOperation. Only MAT_CUSP_MULT_DIAG, MAT_CUSP_MULT_DIAG, and MAT_CUSP_MULT_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatSetFromOptions_MPIAIJCUSP"
PetscErrorCode MatSetFromOptions_MPIAIJCUSP(Mat A)
{
MatCUSPStorageFormat format;
PetscErrorCode ierr;
PetscBool flg;
PetscFunctionBegin;
ierr = PetscOptionsHead("MPIAIJCUSP options");CHKERRQ(ierr);
ierr = PetscObjectOptionsBegin((PetscObject)A);
if (A->factortype==MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusp_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusp gpu matrices for SpMV",
"MatCUSPSetFormat",MatCUSPStorageFormats,(PetscEnum)MAT_CUSP_CSR,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPSetFormat(A,MAT_CUSP_MULT_DIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusp_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusp gpu matrices for SpMV",
"MatCUSPSetFormat",MatCUSPStorageFormats,(PetscEnum)MAT_CUSP_CSR,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPSetFormat(A,MAT_CUSP_MULT_OFFDIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusp_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusp gpu matrices for SpMV",
"MatCUSPSetFormat",MatCUSPStorageFormats,(PetscEnum)MAT_CUSP_CSR,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPSetFormat(A,MAT_CUSP_ALL,format);CHKERRQ(ierr);
}
}
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatDestroy_MPIAIJCUSP"
PetscErrorCode MatDestroy_MPIAIJCUSP(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSP *cuspStruct = (Mat_MPIAIJCUSP*)a->spptr;
cudaError_t err=cudaSuccess;
PetscFunctionBegin;
try {
err = cudaStreamDestroy(cuspStruct->stream);
if (err!=cudaSuccess)
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSP error: %s", cudaGetErrorString(err));
delete cuspStruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSP error: %s", ex);
}
cuspStruct = 0;
ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatCreate_MPIAIJCUSP"
PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSP(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a;
Mat_MPIAIJCUSP * cuspStruct;
cudaError_t err=cudaSuccess;
PetscFunctionBegin;
ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSP);CHKERRQ(ierr);
A->ops->getvecs = MatGetVecs_MPIAIJCUSP;
A->ops->setvaluesbatch = MatSetValuesBatch_MPIAIJCUSP;
a = (Mat_MPIAIJ*)A->data;
a->spptr = new Mat_MPIAIJCUSP;
cuspStruct = (Mat_MPIAIJCUSP*)a->spptr;
cuspStruct->diagGPUMatFormat = MAT_CUSP_CSR;
cuspStruct->offdiagGPUMatFormat = MAT_CUSP_CSR;
err = cudaStreamCreate(&(cuspStruct->stream));
if (err!=cudaSuccess)
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSP error: %s", cudaGetErrorString(err));
A->ops->mult = MatMult_MPIAIJCUSP;
A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSP;
A->ops->destroy = MatDestroy_MPIAIJCUSP;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPSetFormat_C", MatCUSPSetFormat_MPIAIJCUSP);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSP);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatCreateAIJCUSP - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSP library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradigm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSP, MATAIJCUSP
@*/
#undef __FUNCT__
#define __FUNCT__ "MatCreateAIJCUSP"
PetscErrorCode MatCreateAIJCUSP(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) {
ierr = MatSetType(*A,MATMPIAIJCUSP);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
} else {
ierr = MatSetType(*A,MATSEQAIJCUSP);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*M
MATAIJCUSP - MATMPIAIJCUSP= "aijcusp" = "mpiaijcusp" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be CSR format.
All matrix calculations are performed using the CUSP library. DIA and ELL
formats are also available
This matrix type is identical to MATSEQAIJCUSP when constructed with a single process communicator,
and MATMPIAIJCUSP otherwise. As a result, for single process communicators,
MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
for communicators controlling multiple processes. It is recommended that you call both of
the above preallocation routines for simplicity.
Options Database Keys:
+ -mat_type mpiaijcusp - sets the matrix type to "mpiaijcusp" during a call to MatSetFromOptions()
. -mat_cusp_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other storage formats include dia (diagonal) or ell (ellpack).
. -mat_cusp_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other storage formats include dia (diagonal) or ell (ellpack).
- -mat_cusp_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other storage formats include dia (diagonal) or ell (ellpack).
Level: beginner
.seealso: MatCreateAIJCUSP(), MATSEQAIJCUSP, MatCreateSeqAIJCUSP(), MatCUSPSetFormat(), MatCUSPStorageFormat, MatCUSPFormatOperation
M*/
|
b96fda54933b1b6c7e227247237968f93027661b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*=========================================================================
Library : Image Registration Toolkit (IRTK)
Copyright : Imperial College, Department of Computing
Visual Information Processing (VIP), 2011 onwards
Date : $Date: 2013-11-15 14:36:30 +0100 (Fri, 15 Nov 2013) $
Version : $Revision: 1 $
Changes : $Author: bkainz $
Copyright (c) 2014, Bernhard Kainz, Markus Steinberger,
Maria Murgasova, Kevin Keraudren
All rights reserved.
If you use this work for research we would very much appreciate if you cite
Bernhard Kainz, Markus Steinberger, Wolfgang Wein, Maria Kuklisova-Murgasova,
Christina Malamateniou, Kevin Keraudren, Thomas Torsney-Weir, Mary Rutherford,
Paul Aljabar, Joseph V. Hajnal, and Daniel Rueckert: Fast Volume Reconstruction
from Motion Corrupted Stacks of 2D Slices. IEEE Transactions on Medical Imaging,
in print, 2015. doi:10.1109/TMI.2015.2415453
IRTK IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN
AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE
CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED
HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=========================================================================*/
//! Functionality for performing gaussian filtering
#ifndef GAUSSFILTER_CU
#define GAUSSFILTER_CU
#include <stdio.h>
#include <npp.h>
#include "gaussFilterConvolution.cuh"
//#include "gaussfilter_kernel.cu"
#include "helper_cuda.h"
int iDivUp(int a, int b)
{
return (a + b - 1) / b;
//return (a % b != 0) ? (a / b + 1) : (a / b);
}
//!/////////////////////////////////////////////////////////////////////////////
//! General Functions
//!/////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Generate 1D Gaussian convolution kernel
//! @param kernel resulting kernel (necassary memory will be allocated)
//! @param sigma sigma
//! @param klength klength of the kernel
////////////////////////////////////////////////////////////////////////////////
int generateGaussianKernel(float** kernel, float sigma, int klength)
{
// check for valid filter length
if ((klength % 2) == 0)
{
fprintf(stderr, "Error: Convolution Kernel length even\n");
return -1;
}
// allocate memory for kernel
*kernel = (float*)malloc(sizeof(float) * klength);
// sum for normalization
float sum = 0;
// compute kernel values
int mid_point = (int)floor(klength/2.0f);
for( int i = 0; i < klength; i++)
{
// generate value
(*kernel)[i] = exp(-(float)abs(i-mid_point)*(float)abs(i-mid_point)/(2*sigma*sigma));
// update sum for normalization
sum += (*kernel)[i];
}
// normalize kernel
for(int i = 0; i < klength; i++)
(*kernel)[i] /= sum;
return 0;
}
texture<float, hipTextureType1D, hipReadModeElementType> gaussKernelTex_;
template<int klength>
__global__ void GaussXKernel(hipSurfaceObject_t in, hipSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, hipBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
#pragma unroll
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, (x + i)*4, y, z, hipBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, (x - i)*4, y, z, hipBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, hipBoundaryModeZero);
}
__global__ void GaussXKernelGeneral(int klength, hipSurfaceObject_t in, hipSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, hipBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, (x + i)*4, y, z, hipBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, (x - i)*4, y, z, hipBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, hipBoundaryModeZero);
}
template<int klength>
__global__ void GaussYKernel(hipSurfaceObject_t in, hipSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, hipBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
#pragma unroll
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, x*4, y + i, z, hipBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, x*4, y - i, z, hipBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, hipBoundaryModeZero);
}
__global__ void GaussYKernelGeneral(int klength, hipSurfaceObject_t in, hipSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, hipBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, x*4, y + i, z, hipBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, x*4, y - i, z, hipBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, hipBoundaryModeZero);
}
////////////////////////////////////////////////////////////////////////////////
//! Performes optimized gaussian filtering of a stack of image (x,y direction
//! while slices are stacked up along z
//! @param input pointer to input image stack
//! @param output pointer to output image stack
//! @param temp pointer to temp image stack
//! @param width width of the image
//! @param height height of the image
//! @param slices num slices
//! @param pitchX/Y image sizes
//! @param num_ch number of channels in the image
//! @param sigma sigma parameter to construct kernel
////////////////////////////////////////////////////////////////////////////////
int FilterGaussStack(hipSurfaceObject_t input, hipSurfaceObject_t output, hipSurfaceObject_t temp,
unsigned int width, unsigned int height, unsigned int slices, float sigma)
{
int ret = 0;
//determine filter length
int klength = max(min((int)(sigma*5),MAX_LENGTH_SK),7);
klength -= 1-klength%2;
int dev;
hipGetDevice(&dev);
static int lastKLength[128] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
static float lastsigma[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static float* d_GaussKoeffs[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if(lastKLength[dev] != klength || lastsigma[dev] != sigma)
{
if(lastKLength[dev] != klength)
{
if(d_GaussKoeffs[dev] != 0)
hipFree(d_GaussKoeffs[dev]);
hipMalloc(&d_GaussKoeffs[dev], sizeof(float)*(klength+1)/2);
}
// generate kernel
float* kernel = NULL;
ret = generateGaussianKernel(&kernel, sigma, klength);
if (ret)
{
fprintf(stderr, "Error in CUDA FilterGaussStack(): Could not generate Kernel\n");
return ret;
}
hipMemcpy(d_GaussKoeffs[dev], kernel + klength/2, (klength+1)/2*sizeof(float), hipMemcpyHostToDevice);
free(kernel);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
checkCudaErrors(hipBindTexture(0, gaussKernelTex_, d_GaussKoeffs[dev], hipCreateChannelDesc<float>(),(klength+1)/2*sizeof(float)));
gaussKernelTex_.addressMode[0] = hipAddressModeClamp;
gaussKernelTex_.filterMode = hipFilterModePoint;
gaussKernelTex_.normalized = false;
lastsigma[dev] = sigma;
lastKLength[dev] = klength;
}
//filter (with optimizations for special cases)
const int blockSize1 = 32;
const int blockSize2 = 32;
dim3 blockx(blockSize1, blockSize2);
dim3 gridx(iDivUp(width, blockSize1), iDivUp(height, blockSize2), slices);
dim3 blocky(blockSize2, blockSize1);
dim3 gridy(iDivUp(width, blockSize2), iDivUp(height, blockSize1), slices);
switch(klength)
{
case 7:
hipLaunchKernelGGL(( GaussXKernel<7>), dim3(gridx), dim3(blockx), 0, 0, input, temp);
hipLaunchKernelGGL(( GaussYKernel<7>), dim3(gridy), dim3(blocky), 0, 0, temp, output);
break;
case 9:
hipLaunchKernelGGL(( GaussXKernel<9>), dim3(gridx), dim3(blockx), 0, 0, input, temp);
hipLaunchKernelGGL(( GaussYKernel<9>), dim3(gridy), dim3(blocky), 0, 0, temp, output);
break;
case 11:
hipLaunchKernelGGL(( GaussXKernel<11>), dim3(gridx), dim3(blockx), 0, 0, input, temp);
hipLaunchKernelGGL(( GaussYKernel<11>), dim3(gridy), dim3(blocky), 0, 0, temp, output);
break;
case 13:
hipLaunchKernelGGL(( GaussXKernel<13>), dim3(gridx), dim3(blockx), 0, 0, input, temp);
hipLaunchKernelGGL(( GaussYKernel<14>), dim3(gridy), dim3(blocky), 0, 0, temp, output);
break;
case 15:
hipLaunchKernelGGL(( GaussXKernel<15>), dim3(gridx), dim3(blockx), 0, 0, input, temp);
hipLaunchKernelGGL(( GaussYKernel<15>), dim3(gridy), dim3(blocky), 0, 0, temp, output);
break;
default:
hipLaunchKernelGGL(( GaussXKernelGeneral), dim3(gridx), dim3(blockx), 0, 0, klength, input, temp);
hipLaunchKernelGGL(( GaussYKernelGeneral), dim3(gridy), dim3(blocky), 0, 0, klength, temp, output);
break;
}
return ret;
}
#endif // GAUSSFILTER_CU
| b96fda54933b1b6c7e227247237968f93027661b.cu | /*=========================================================================
Library : Image Registration Toolkit (IRTK)
Copyright : Imperial College, Department of Computing
Visual Information Processing (VIP), 2011 onwards
Date : $Date: 2013-11-15 14:36:30 +0100 (Fri, 15 Nov 2013) $
Version : $Revision: 1 $
Changes : $Author: bkainz $
Copyright (c) 2014, Bernhard Kainz, Markus Steinberger,
Maria Murgasova, Kevin Keraudren
All rights reserved.
If you use this work for research we would very much appreciate if you cite
Bernhard Kainz, Markus Steinberger, Wolfgang Wein, Maria Kuklisova-Murgasova,
Christina Malamateniou, Kevin Keraudren, Thomas Torsney-Weir, Mary Rutherford,
Paul Aljabar, Joseph V. Hajnal, and Daniel Rueckert: Fast Volume Reconstruction
from Motion Corrupted Stacks of 2D Slices. IEEE Transactions on Medical Imaging,
in print, 2015. doi:10.1109/TMI.2015.2415453
IRTK IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN
AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE
CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED
HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=========================================================================*/
//! Functionality for performing gaussian filtering
#ifndef GAUSSFILTER_CU
#define GAUSSFILTER_CU
#include <stdio.h>
#include <npp.h>
#include "gaussFilterConvolution.cuh"
//#include "gaussfilter_kernel.cu"
#include "helper_cuda.h"
int iDivUp(int a, int b)
{
return (a + b - 1) / b;
//return (a % b != 0) ? (a / b + 1) : (a / b);
}
//!/////////////////////////////////////////////////////////////////////////////
//! General Functions
//!/////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Generate 1D Gaussian convolution kernel
//! @param kernel resulting kernel (necassary memory will be allocated)
//! @param sigma sigma
//! @param klength klength of the kernel
////////////////////////////////////////////////////////////////////////////////
int generateGaussianKernel(float** kernel, float sigma, int klength)
{
// check for valid filter length
if ((klength % 2) == 0)
{
fprintf(stderr, "Error: Convolution Kernel length even\n");
return -1;
}
// allocate memory for kernel
*kernel = (float*)malloc(sizeof(float) * klength);
// sum for normalization
float sum = 0;
// compute kernel values
int mid_point = (int)floor(klength/2.0f);
for( int i = 0; i < klength; i++)
{
// generate value
(*kernel)[i] = exp(-(float)abs(i-mid_point)*(float)abs(i-mid_point)/(2*sigma*sigma));
// update sum for normalization
sum += (*kernel)[i];
}
// normalize kernel
for(int i = 0; i < klength; i++)
(*kernel)[i] /= sum;
return 0;
}
texture<float, cudaTextureType1D, cudaReadModeElementType> gaussKernelTex_;
template<int klength>
__global__ void GaussXKernel(cudaSurfaceObject_t in, cudaSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, cudaBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
#pragma unroll
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, (x + i)*4, y, z, cudaBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, (x - i)*4, y, z, cudaBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, cudaBoundaryModeZero);
}
__global__ void GaussXKernelGeneral(int klength, cudaSurfaceObject_t in, cudaSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, cudaBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, (x + i)*4, y, z, cudaBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, (x - i)*4, y, z, cudaBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, cudaBoundaryModeZero);
}
template<int klength>
__global__ void GaussYKernel(cudaSurfaceObject_t in, cudaSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, cudaBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
#pragma unroll
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, x*4, y + i, z, cudaBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, x*4, y - i, z, cudaBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, cudaBoundaryModeZero);
}
__global__ void GaussYKernelGeneral(int klength, cudaSurfaceObject_t in, cudaSurfaceObject_t out)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float v = surf2DLayeredread<float>(in, x*4, y, z, cudaBoundaryModeClamp);
if(v != -1)
{
v = v * tex1Dfetch(gaussKernelTex_, 0);
for(int i = 1; i < (klength+1)/2; ++i)
v = v + tex1Dfetch(gaussKernelTex_, i) * (max(0.0f,surf2DLayeredread<float>(in, x*4, y + i, z, cudaBoundaryModeClamp)) +
max(0.0f,surf2DLayeredread<float>(in, x*4, y - i, z, cudaBoundaryModeClamp)) );
}
surf2DLayeredwrite(v, out, x*4, y, z, cudaBoundaryModeZero);
}
////////////////////////////////////////////////////////////////////////////////
//! Performes optimized gaussian filtering of a stack of image (x,y direction
//! while slices are stacked up along z
//! @param input pointer to input image stack
//! @param output pointer to output image stack
//! @param temp pointer to temp image stack
//! @param width width of the image
//! @param height height of the image
//! @param slices num slices
//! @param pitchX/Y image sizes
//! @param num_ch number of channels in the image
//! @param sigma sigma parameter to construct kernel
////////////////////////////////////////////////////////////////////////////////
int FilterGaussStack(cudaSurfaceObject_t input, cudaSurfaceObject_t output, cudaSurfaceObject_t temp,
unsigned int width, unsigned int height, unsigned int slices, float sigma)
{
int ret = 0;
//determine filter length
int klength = max(min((int)(sigma*5),MAX_LENGTH_SK),7);
klength -= 1-klength%2;
int dev;
cudaGetDevice(&dev);
static int lastKLength[128] = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
static float lastsigma[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static float* d_GaussKoeffs[128] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if(lastKLength[dev] != klength || lastsigma[dev] != sigma)
{
if(lastKLength[dev] != klength)
{
if(d_GaussKoeffs[dev] != 0)
cudaFree(d_GaussKoeffs[dev]);
cudaMalloc(&d_GaussKoeffs[dev], sizeof(float)*(klength+1)/2);
}
// generate kernel
float* kernel = NULL;
ret = generateGaussianKernel(&kernel, sigma, klength);
if (ret)
{
fprintf(stderr, "Error in CUDA FilterGaussStack(): Could not generate Kernel\n");
return ret;
}
cudaMemcpy(d_GaussKoeffs[dev], kernel + klength/2, (klength+1)/2*sizeof(float), cudaMemcpyHostToDevice);
free(kernel);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaBindTexture(0, gaussKernelTex_, d_GaussKoeffs[dev], cudaCreateChannelDesc<float>(),(klength+1)/2*sizeof(float)));
gaussKernelTex_.addressMode[0] = cudaAddressModeClamp;
gaussKernelTex_.filterMode = cudaFilterModePoint;
gaussKernelTex_.normalized = false;
lastsigma[dev] = sigma;
lastKLength[dev] = klength;
}
//filter (with optimizations for special cases)
const int blockSize1 = 32;
const int blockSize2 = 32;
dim3 blockx(blockSize1, blockSize2);
dim3 gridx(iDivUp(width, blockSize1), iDivUp(height, blockSize2), slices);
dim3 blocky(blockSize2, blockSize1);
dim3 gridy(iDivUp(width, blockSize2), iDivUp(height, blockSize1), slices);
switch(klength)
{
case 7:
GaussXKernel<7><<<gridx, blockx>>>(input, temp);
GaussYKernel<7><<<gridy, blocky>>>(temp, output);
break;
case 9:
GaussXKernel<9><<<gridx, blockx>>>(input, temp);
GaussYKernel<9><<<gridy, blocky>>>(temp, output);
break;
case 11:
GaussXKernel<11><<<gridx, blockx>>>(input, temp);
GaussYKernel<11><<<gridy, blocky>>>(temp, output);
break;
case 13:
GaussXKernel<13><<<gridx, blockx>>>(input, temp);
GaussYKernel<14><<<gridy, blocky>>>(temp, output);
break;
case 15:
GaussXKernel<15><<<gridx, blockx>>>(input, temp);
GaussYKernel<15><<<gridy, blocky>>>(temp, output);
break;
default:
GaussXKernelGeneral<<<gridx, blockx>>>(klength, input, temp);
GaussYKernelGeneral<<<gridy, blocky>>>(klength, temp, output);
break;
}
return ret;
}
#endif // GAUSSFILTER_CU
|
b6ac68e44f0f0c3e062a2837cf74ad3771f7e8c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <bake_kernels.h>
#include <bake_api.h>
#include <cuda/random.h>
#include <optixu/optixu_math_namespace.h>
using optix::float3;
inline int idiv_ceil(const int x, const int y)
{
return (x + y - 1) / y;
}
// Ray generation kernel
__global__ void generate_rays_kernel(
const unsigned int base_seed,
const int px,
const int py,
const int sqrt_passes,
const float scene_offset,
const int num_samples,
const float3* sample_normals,
const float3* sample_face_normals,
const float3* sample_positions,
bake::Ray* rays)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num_samples) return;
const unsigned int tea_seed = (base_seed << 16) | (px * sqrt_passes + py);
unsigned seed = tea<2>(tea_seed, idx);
const float3 sample_norm = sample_normals[idx];
const float3 sample_face_norm = sample_face_normals[idx];
const float3 sample_pos = sample_positions[idx];
const float3 ray_origin = sample_pos + scene_offset * sample_norm;
optix::Onb onb(sample_norm);
float3 ray_dir;
float u0 = (static_cast<float>(px) + rnd(seed)) / static_cast<float>(sqrt_passes);
float u1 = (static_cast<float>(py) + rnd(seed)) / static_cast<float>(sqrt_passes);
int j = 0;
do
{
optix::cosine_sample_hemisphere(u0, u1, ray_dir);
onb.inverse_transform(ray_dir);
++j;
u0 = rnd(seed);
u1 = rnd(seed);
}
while (j < 5 && optix::dot(ray_dir, sample_face_norm) <= 0.0f);
rays[idx].origin = ray_origin;
rays[idx].direction = ray_dir;
}
__host__ void bake::generate_rays_device(unsigned int seed, int px, int py, int sqrt_passes, float scene_offset, const bake::AOSamples& ao_samples, Ray* rays)
{
const int block_size = 512;
const int block_count = idiv_ceil((int)ao_samples.num_samples, block_size);
hipLaunchKernelGGL(( generate_rays_kernel), dim3(block_count),dim3(block_size), 0, 0,
seed,
px,
py,
sqrt_passes,
scene_offset,
(int)ao_samples.num_samples,
(float3*)ao_samples.sample_normals,
(float3*)ao_samples.sample_face_normals,
(float3*)ao_samples.sample_positions,
rays
);
}
// AO update kernel
__global__ void update_ao_kernel(int num_samples, float maxdistance, const float* hit_data, float* ao_data)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num_samples) return;
float distance = hit_data[idx];
ao_data[idx] += distance > 0.0 && distance < maxdistance ? 1.0f : 0.0f;
}
// Precondition: ao output initialized to 0 before first pass
__host__ void bake::update_ao_device(size_t num_samples, float max_distance, const float* hits, float* ao)
{
int block_size = 512;
int block_count = idiv_ceil(num_samples, block_size);
hipLaunchKernelGGL(( update_ao_kernel), dim3(block_count), dim3(block_size), 0, 0, num_samples, max_distance, hits, ao);
}
| b6ac68e44f0f0c3e062a2837cf74ad3771f7e8c2.cu | /* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <bake_kernels.h>
#include <bake_api.h>
#include <cuda/random.h>
#include <optixu/optixu_math_namespace.h>
using optix::float3;
inline int idiv_ceil(const int x, const int y)
{
return (x + y - 1) / y;
}
// Ray generation kernel
__global__ void generate_rays_kernel(
const unsigned int base_seed,
const int px,
const int py,
const int sqrt_passes,
const float scene_offset,
const int num_samples,
const float3* sample_normals,
const float3* sample_face_normals,
const float3* sample_positions,
bake::Ray* rays)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num_samples) return;
const unsigned int tea_seed = (base_seed << 16) | (px * sqrt_passes + py);
unsigned seed = tea<2>(tea_seed, idx);
const float3 sample_norm = sample_normals[idx];
const float3 sample_face_norm = sample_face_normals[idx];
const float3 sample_pos = sample_positions[idx];
const float3 ray_origin = sample_pos + scene_offset * sample_norm;
optix::Onb onb(sample_norm);
float3 ray_dir;
float u0 = (static_cast<float>(px) + rnd(seed)) / static_cast<float>(sqrt_passes);
float u1 = (static_cast<float>(py) + rnd(seed)) / static_cast<float>(sqrt_passes);
int j = 0;
do
{
optix::cosine_sample_hemisphere(u0, u1, ray_dir);
onb.inverse_transform(ray_dir);
++j;
u0 = rnd(seed);
u1 = rnd(seed);
}
while (j < 5 && optix::dot(ray_dir, sample_face_norm) <= 0.0f);
rays[idx].origin = ray_origin;
rays[idx].direction = ray_dir;
}
__host__ void bake::generate_rays_device(unsigned int seed, int px, int py, int sqrt_passes, float scene_offset, const bake::AOSamples& ao_samples, Ray* rays)
{
const int block_size = 512;
const int block_count = idiv_ceil((int)ao_samples.num_samples, block_size);
generate_rays_kernel<<<block_count,block_size>>>(
seed,
px,
py,
sqrt_passes,
scene_offset,
(int)ao_samples.num_samples,
(float3*)ao_samples.sample_normals,
(float3*)ao_samples.sample_face_normals,
(float3*)ao_samples.sample_positions,
rays
);
}
// AO update kernel
__global__ void update_ao_kernel(int num_samples, float maxdistance, const float* hit_data, float* ao_data)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num_samples) return;
float distance = hit_data[idx];
ao_data[idx] += distance > 0.0 && distance < maxdistance ? 1.0f : 0.0f;
}
// Precondition: ao output initialized to 0 before first pass
__host__ void bake::update_ao_device(size_t num_samples, float max_distance, const float* hits, float* ao)
{
int block_size = 512;
int block_count = idiv_ceil(num_samples, block_size);
update_ao_kernel<<<block_count, block_size>>>(num_samples, max_distance, hits, ao);
}
|
293840327cfea119929e30a521a5001c1efb8e0c.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k1_name[] = "modified_bessel_k1_forward";
void modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_k1_stub, &modified_bessel_k1_kernel_cuda);
} // namespace at::native
| 293840327cfea119929e30a521a5001c1efb8e0c.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k1_name[] = "modified_bessel_k1_forward";
void modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_k1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_k1_stub, &modified_bessel_k1_kernel_cuda);
} // namespace at::native
|
81d0094cb6f5c51b886301ddd43c2c98f40ea9ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_fp16.h>
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/common.hpp"
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemm<float16>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C) {
// Note that cublas follows fortran order.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// TODO
if (Caffe::device_capability(Caffe::current_device()) == 600) {
CUBLAS_CHECK(hipblasHgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, alpha.gethp(), B->gethp(), ldb,
A->gethp(), lda, beta.gethp(), C->gethp(), N));
} else {
float alpha_fp32 = static_cast<float>(alpha);
float beta_fp32 = static_cast<float>(beta);
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha_fp32, B->gethp(), CAFFE_DATA_HALF, ldb,
A->gethp(), CAFFE_DATA_HALF, lda, &beta_fp32, C->gethp(), CAFFE_DATA_HALF, N));
}
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemv<float16>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float16 alpha, const float16* A, const float16* x,
const float16 beta, float16* y) {
float alpha_fp32 = static_cast<float>(alpha);
float beta_fp32 = static_cast<float>(beta);
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
int LDC = m;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransA, HIPBLAS_OP_N,
m, 1, k, &alpha_fp32, A, CAFFE_DATA_HALF, LDA,
x, CAFFE_DATA_HALF, k, &beta_fp32,
y, CAFFE_DATA_HALF, LDC));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y, void* handle) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(hipblasSaxpy(cublas_handle, N, &alpha, X, 1, Y, 1));
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y, void* handle) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(hipblasDaxpy(cublas_handle, N, &alpha, X, 1, Y, 1));
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype, typename Mtype>
__global__
void axpy_kernel(const int N, const Mtype alpha, const Dtype* x, Dtype* y) {
for (int idx = threadIdx.x + blockDim.x * blockIdx.x; idx < N;
idx += blockDim.x * gridDim.x) {
y[idx] = alpha * (Mtype) x[idx] + (Mtype) y[idx];
}
}
template<>
__global__
void axpy_kernel<__half2, __half2>(const int N, const __half2 alpha, const __half2* x, __half2* y) {
#if __CUDA_ARCH__ >= 530
CUDA_KERNEL_LOOP(idx, N) {
y[idx] = __hfma2(alpha, x[idx], y[idx]);
}
#else
float2 a = __half22float2(alpha);
float2 x2, y2;
CUDA_KERNEL_LOOP(idx, N) {
x2 = __half22float2(x[idx]);
y2 = __half22float2(y[idx]);
y2.x = a.x * x2.x + y2.x;
y2.y = a.y * x2.y + y2.y;
y[idx] = float22half2_clip(y2);
}
#endif
}
template<>
__global__
void axpy_kernel<__half2, float>(const int N, const float alpha, const __half2* x, __half2* y) {
#if __CUDA_ARCH__ >= 530
__half2 a = __float2half2_rn(alpha);
CUDA_KERNEL_LOOP(idx, N) {
y[idx] = __hfma2(a, x[idx], y[idx]);
}
#else
float2 x2, y2;
CUDA_KERNEL_LOOP(idx, N) {
x2 = __half22float2(x[idx]);
y2 = __half22float2(y[idx]);
y2.x = alpha * x2.x + y2.x;
y2.y = alpha * x2.y + y2.y;
y[idx] = float22half2_clip(y2);
}
#endif
}
template<>
void caffe_gpu_axpy<float16>(const int N, const float16 alpha, const float16* x, float16* y,
void* handle) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
const unsigned int n2 = even(N) / 2;
unsigned int alphax = (unsigned int) alpha.getx();
__half2 alpha2;
alpha2.x = alphax + (alphax << 16);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( axpy_kernel) , dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, alpha2, reinterpret_cast<const __half2*>(x), reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
void caffe_gpu_axpy_extfp16(const int N, const float alpha, const float16* x, float16* y) {
hipStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(N) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( axpy_kernel) , dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, alpha, reinterpret_cast<const __half2*>(x), reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
hipStream_t stream = Caffe::thread_stream();
CUDA_CHECK(hipMemcpyAsync(Y, X, N, hipMemcpyDefault, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipblasHandle_t cublas_handle, bool sync) {
if (alpha == 1.F) { return; }
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(hipblasSscal(cublas_handle, N, &alpha, X, 1));
if (sync) {
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipblasHandle_t cublas_handle, bool sync) {
if (alpha == 1.0) { return; }
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(hipblasDscal(cublas_handle, N, &alpha, X, 1));
if (sync) {
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X) {
caffe_gpu_scal(N, alpha, X, Caffe::cublas_handle(), true);
}
template<>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X) {
caffe_gpu_scal(N, alpha, X, Caffe::cublas_handle(), true);
}
__global__
void scale_in_place_kernel(const int n, const __half2 alpha, __half2* x) {
CUDA_KERNEL_LOOP(idx, n) {
x[idx] = hmul2(alpha, x[idx]);
}
}
// local helper
void caffe_gpu_scal_float16(const int n, const float16 alpha, float16* x, hipStream_t stream,
bool sync) {
if (alpha == 1.F) { return; }
const unsigned int n2 = even(n) / 2;
unsigned int alphax = (unsigned int) alpha.getx();
__half2 alpha2;
alpha2.x = alphax + (alphax << 16);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( scale_in_place_kernel) , dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, alpha2, reinterpret_cast<__half2*>(x));
CUDA_POST_KERNEL_CHECK;
if (sync) {
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<float16>(const int n, const float16 alpha, float16* x) {
caffe_gpu_scal_float16(n, alpha, x, Caffe::thread_stream(), true);
}
template<>
void caffe_gpu_scal<float16>(const int n, const float16 alpha, float16* x,
hipblasHandle_t cublas_handle, bool sync) {
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// use cublasHscal when it will become available
caffe_gpu_scal_float16(n, alpha, x, stream, sync);
}
// alpha might be big:
__global__
void scale_in_place_kernel_fp16(const int n, const float alpha, __half2* x) {
CUDA_KERNEL_LOOP(idx, n) {
float2 x2 = __half22float2(x[idx]);
x2.x *= alpha;
x2.y *= alpha;
x[idx] = float22half2_clip(x2);
}
}
void caffe_gpu_scal_fp16(const int n, const float alpha, float16* x,
hipblasHandle_t cublas_handle, bool sync) {
if (alpha == 1.F) { return; }
const unsigned int n2 = even(n) / 2;
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( scale_in_place_kernel_fp16), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, alpha, reinterpret_cast<__half2*>(x));
CUDA_POST_KERNEL_CHECK;
if (sync) {
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template<>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template<typename Dtype, typename Mtype>
__global__
void axpby_kernel(const int N, const Mtype alpha, const Dtype* X, const Mtype beta, Dtype* Y) {
CUDA_KERNEL_LOOP(idx, N) {
Y[idx] = alpha * X[idx] + beta * Y[idx];
}
}
template<>
void caffe_gpu_axpby<float16>(const int N, const float16 alpha,
const float16* X, const float16 beta, float16* Y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( axpby_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, X, beta, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_dot<float, float>(const int n, const float* x, const float* y, float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_dot<double, double>(const int n, const double* x, const double* y, double* out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_dot<double, float>(const int n, const double* x, const double* y, float* outf) {
double out = 0.;
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, &out));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
*outf = static_cast<float>(out);
}
template<typename Dtype, typename Mtype>
__global__
void gpu_dot_kernel(const int N, const Dtype* x, const Dtype* y, Mtype* out) {
__shared__
Mtype cache[CAFFE_CUDA_NUM_THREADS];
const int tidx = threadIdx.x;
cache[tidx] = 0.;
for (int i = tidx; i < N; i += blockDim.x) {
cache[tidx] += static_cast<Mtype>(x[i]) * static_cast<Mtype>(y[i]);
}
__syncthreads();
for (int s = CAFFE_CUDA_NUM_THREADS / 2; s > 0; s >>= 1) {
if (tidx < s) cache[tidx] += cache[tidx + s];
__syncthreads();
}
if (tidx == 0) *out = cache[tidx];
}
// TODO unit test
template<>
void
caffe_gpu_dot<float16, float16>(const int n, const float16* x, const float16* y, float16* out) {
float* res = reinterpret_cast<float*>(GPUMemory::pinned_buffer(sizeof(float)));
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( gpu_dot_kernel), dim3(1), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, n, x, y, res);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
*out = static_cast<float16>(*res);
}
template<>
void caffe_gpu_dot<float16, float>(const int n, const float16* x, const float16* y, float* out) {
float* res = reinterpret_cast<float*>(GPUMemory::pinned_buffer(sizeof(float)));
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( gpu_dot_kernel), dim3(1), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, n, x, y, res);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
*out = *res;
}
template<>
void caffe_gpu_asum<float, float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_asum<float, double>(const int n, const float* x, double* y) {
float yf;
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, &yf));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
*y = yf;
}
template<>
void caffe_gpu_asum<double, double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_asum<double, float>(const int n, const double* x, float* y) {
double yd;
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, &yd));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
*y = yd;
}
template<>
void caffe_gpu_scale<double>(const int n, const double alpha, const double* x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_scale<float>(const int n, const float alpha, const float* x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
}
__global__
void scale_kernel(const int n, const __half2 alpha,
const __half2* x, __half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = hmul2(alpha, x[idx]);
}
}
template<>
void caffe_gpu_scale<float16>(const int n, const float16 alpha,
const float16* x, float16* y) {
hipStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(n) / 2;
unsigned int alphax = (unsigned int) alpha.getx();
__half2 alpha2;
alpha2.x = alphax + (alphax << 16);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( scale_kernel) , dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, alpha2, reinterpret_cast<const __half2*>(x), reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void set_kernel(const size_t n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template<typename Dtype>
void caffe_gpu_set(const size_t N, const Dtype alpha, Dtype* Y, bool sync, hipStream_t stream) {
if (stream == nullptr) {
stream = Caffe::thread_stream();
}
if (alpha == 0) {
CUDA_CHECK(hipMemsetAsync(Y, 0, sizeof(Dtype) * N, stream)); // NOLINT(caffe/alt_fn)
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
if (sync) {
CUDA_CHECK(hipStreamSynchronize(stream));
}
}
template void
caffe_gpu_set<int>(const size_t N, const int alpha, int* Y, bool sync, hipStream_t stream);
template void
caffe_gpu_set<float>(const size_t N, const float alpha, float* Y, bool sync, hipStream_t stream);
template void caffe_gpu_set<double>(const size_t N, const double alpha, double* Y, bool sync,
hipStream_t stream);
template void caffe_gpu_set<float16>(const size_t N, const float16 alpha, float16* Y, bool sync,
hipStream_t stream);
template<typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template<>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators
hipLaunchKernelGGL(( add_scalar_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_add_scalar(const int N, const float16 alpha, float16* Y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template<>
__global__ void add_kernel<__half2>(const int n, const __half2* a, const __half2* b, __half2* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = hadd2(a[index], b[index]);
}
}
template<>
void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_add<float16>(const int N, const float16* a, const float16* b, float16* y) {
hipStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(N) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel), dim3(CAFFE_GET_BLOCKS_HALF(n2)), dim3(CAFFE_CUDA_NUM_THREADS_HALF), 0, stream,
n2, reinterpret_cast<const __half2*>(a), reinterpret_cast<const __half2*>(b),
reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template<>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void caffe_gpu_sub<float16>(const int N, const float16* a, const float16* b, float16* y) {
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
} // namespace caffe
| 81d0094cb6f5c51b886301ddd43c2c98f40ea9ea.cu | #include <cuda_fp16.h>
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/common.hpp"
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemm<float16>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float16 alpha, const float16* A, const float16* B, const float16 beta,
float16* C) {
// Note that cublas follows fortran order.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// TODO
if (Caffe::device_capability(Caffe::current_device()) == 600) {
CUBLAS_CHECK(cublasHgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, alpha.gethp(), B->gethp(), ldb,
A->gethp(), lda, beta.gethp(), C->gethp(), N));
} else {
float alpha_fp32 = static_cast<float>(alpha);
float beta_fp32 = static_cast<float>(beta);
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha_fp32, B->gethp(), CAFFE_DATA_HALF, ldb,
A->gethp(), CAFFE_DATA_HALF, lda, &beta_fp32, C->gethp(), CAFFE_DATA_HALF, N));
}
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_gemv<float16>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float16 alpha, const float16* A, const float16* x,
const float16 beta, float16* y) {
float alpha_fp32 = static_cast<float>(alpha);
float beta_fp32 = static_cast<float>(beta);
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
int LDC = m;
CUBLAS_CHECK(cublasSgemmEx(Caffe::cublas_handle(), cuTransA, CUBLAS_OP_N,
m, 1, k, &alpha_fp32, A, CAFFE_DATA_HALF, LDA,
x, CAFFE_DATA_HALF, k, &beta_fp32,
y, CAFFE_DATA_HALF, LDC));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y, void* handle) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(cublasSaxpy(cublas_handle, N, &alpha, X, 1, Y, 1));
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y, void* handle) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(cublasDaxpy(cublas_handle, N, &alpha, X, 1, Y, 1));
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype, typename Mtype>
__global__
void axpy_kernel(const int N, const Mtype alpha, const Dtype* x, Dtype* y) {
for (int idx = threadIdx.x + blockDim.x * blockIdx.x; idx < N;
idx += blockDim.x * gridDim.x) {
y[idx] = alpha * (Mtype) x[idx] + (Mtype) y[idx];
}
}
template<>
__global__
void axpy_kernel<__half2, __half2>(const int N, const __half2 alpha, const __half2* x, __half2* y) {
#if __CUDA_ARCH__ >= 530
CUDA_KERNEL_LOOP(idx, N) {
y[idx] = __hfma2(alpha, x[idx], y[idx]);
}
#else
float2 a = __half22float2(alpha);
float2 x2, y2;
CUDA_KERNEL_LOOP(idx, N) {
x2 = __half22float2(x[idx]);
y2 = __half22float2(y[idx]);
y2.x = a.x * x2.x + y2.x;
y2.y = a.y * x2.y + y2.y;
y[idx] = float22half2_clip(y2);
}
#endif
}
template<>
__global__
void axpy_kernel<__half2, float>(const int N, const float alpha, const __half2* x, __half2* y) {
#if __CUDA_ARCH__ >= 530
__half2 a = __float2half2_rn(alpha);
CUDA_KERNEL_LOOP(idx, N) {
y[idx] = __hfma2(a, x[idx], y[idx]);
}
#else
float2 x2, y2;
CUDA_KERNEL_LOOP(idx, N) {
x2 = __half22float2(x[idx]);
y2 = __half22float2(y[idx]);
y2.x = alpha * x2.x + y2.x;
y2.y = alpha * x2.y + y2.y;
y[idx] = float22half2_clip(y2);
}
#endif
}
template<>
void caffe_gpu_axpy<float16>(const int N, const float16 alpha, const float16* x, float16* y,
void* handle) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
const unsigned int n2 = even(N) / 2;
unsigned int alphax = (unsigned int) alpha.getx();
__half2 alpha2;
alpha2.x = alphax + (alphax << 16);
// NOLINT_NEXT_LINE(whitespace/operators)
axpy_kernel <<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, alpha2, reinterpret_cast<const __half2*>(x), reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void caffe_gpu_axpy_extfp16(const int N, const float alpha, const float16* x, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(N) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
axpy_kernel <<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, alpha, reinterpret_cast<const __half2*>(x), reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
cudaStream_t stream = Caffe::thread_stream();
CUDA_CHECK(cudaMemcpyAsync(Y, X, N, cudaMemcpyDefault, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cublasHandle_t cublas_handle, bool sync) {
if (alpha == 1.F) { return; }
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(cublasSscal(cublas_handle, N, &alpha, X, 1));
if (sync) {
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cublasHandle_t cublas_handle, bool sync) {
if (alpha == 1.0) { return; }
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
CUBLAS_CHECK(cublasDscal(cublas_handle, N, &alpha, X, 1));
if (sync) {
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X) {
caffe_gpu_scal(N, alpha, X, Caffe::cublas_handle(), true);
}
template<>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X) {
caffe_gpu_scal(N, alpha, X, Caffe::cublas_handle(), true);
}
__global__
void scale_in_place_kernel(const int n, const __half2 alpha, __half2* x) {
CUDA_KERNEL_LOOP(idx, n) {
x[idx] = hmul2(alpha, x[idx]);
}
}
// local helper
void caffe_gpu_scal_float16(const int n, const float16 alpha, float16* x, cudaStream_t stream,
bool sync) {
if (alpha == 1.F) { return; }
const unsigned int n2 = even(n) / 2;
unsigned int alphax = (unsigned int) alpha.getx();
__half2 alpha2;
alpha2.x = alphax + (alphax << 16);
// NOLINT_NEXT_LINE(whitespace/operators)
scale_in_place_kernel <<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, alpha2, reinterpret_cast<__half2*>(x));
CUDA_POST_KERNEL_CHECK;
if (sync) {
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_scal<float16>(const int n, const float16 alpha, float16* x) {
caffe_gpu_scal_float16(n, alpha, x, Caffe::thread_stream(), true);
}
template<>
void caffe_gpu_scal<float16>(const int n, const float16 alpha, float16* x,
cublasHandle_t cublas_handle, bool sync) {
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// use cublasHscal when it will become available
caffe_gpu_scal_float16(n, alpha, x, stream, sync);
}
// alpha might be big:
__global__
void scale_in_place_kernel_fp16(const int n, const float alpha, __half2* x) {
CUDA_KERNEL_LOOP(idx, n) {
float2 x2 = __half22float2(x[idx]);
x2.x *= alpha;
x2.y *= alpha;
x[idx] = float22half2_clip(x2);
}
}
void caffe_gpu_scal_fp16(const int n, const float alpha, float16* x,
cublasHandle_t cublas_handle, bool sync) {
if (alpha == 1.F) { return; }
const unsigned int n2 = even(n) / 2;
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
scale_in_place_kernel_fp16<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, alpha, reinterpret_cast<__half2*>(x));
CUDA_POST_KERNEL_CHECK;
if (sync) {
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
template<>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template<>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template<typename Dtype, typename Mtype>
__global__
void axpby_kernel(const int N, const Mtype alpha, const Dtype* X, const Mtype beta, Dtype* Y) {
CUDA_KERNEL_LOOP(idx, N) {
Y[idx] = alpha * X[idx] + beta * Y[idx];
}
}
template<>
void caffe_gpu_axpby<float16>(const int N, const float16 alpha,
const float16* X, const float16 beta, float16* Y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
axpby_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, alpha, X, beta, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_dot<float, float>(const int n, const float* x, const float* y, float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_dot<double, double>(const int n, const double* x, const double* y, double* out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_dot<double, float>(const int n, const double* x, const double* y, float* outf) {
double out = 0.;
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, &out));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
*outf = static_cast<float>(out);
}
template<typename Dtype, typename Mtype>
__global__
void gpu_dot_kernel(const int N, const Dtype* x, const Dtype* y, Mtype* out) {
__shared__
Mtype cache[CAFFE_CUDA_NUM_THREADS];
const int tidx = threadIdx.x;
cache[tidx] = 0.;
for (int i = tidx; i < N; i += blockDim.x) {
cache[tidx] += static_cast<Mtype>(x[i]) * static_cast<Mtype>(y[i]);
}
__syncthreads();
for (int s = CAFFE_CUDA_NUM_THREADS / 2; s > 0; s >>= 1) {
if (tidx < s) cache[tidx] += cache[tidx + s];
__syncthreads();
}
if (tidx == 0) *out = cache[tidx];
}
// TODO unit test
template<>
void
caffe_gpu_dot<float16, float16>(const int n, const float16* x, const float16* y, float16* out) {
float* res = reinterpret_cast<float*>(GPUMemory::pinned_buffer(sizeof(float)));
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
gpu_dot_kernel<<<1, CAFFE_CUDA_NUM_THREADS, 0, stream>>>(n, x, y, res);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
*out = static_cast<float16>(*res);
}
template<>
void caffe_gpu_dot<float16, float>(const int n, const float16* x, const float16* y, float* out) {
float* res = reinterpret_cast<float*>(GPUMemory::pinned_buffer(sizeof(float)));
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
gpu_dot_kernel<<<1, CAFFE_CUDA_NUM_THREADS, 0, stream>>>(n, x, y, res);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
*out = *res;
}
template<>
void caffe_gpu_asum<float, float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_asum<float, double>(const int n, const float* x, double* y) {
float yf;
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, &yf));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
*y = yf;
}
template<>
void caffe_gpu_asum<double, double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_asum<double, float>(const int n, const double* x, float* y) {
double yd;
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, &yd));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
*y = yd;
}
template<>
void caffe_gpu_scale<double>(const int n, const double alpha, const double* x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
template<>
void caffe_gpu_scale<float>(const int n, const float alpha, const float* x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
}
__global__
void scale_kernel(const int n, const __half2 alpha,
const __half2* x, __half2* y) {
CUDA_KERNEL_LOOP(idx, n) {
y[idx] = hmul2(alpha, x[idx]);
}
}
template<>
void caffe_gpu_scale<float16>(const int n, const float16 alpha,
const float16* x, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(n) / 2;
unsigned int alphax = (unsigned int) alpha.getx();
__half2 alpha2;
alpha2.x = alphax + (alphax << 16);
// NOLINT_NEXT_LINE(whitespace/operators)
scale_kernel <<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, alpha2, reinterpret_cast<const __half2*>(x), reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void set_kernel(const size_t n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template<typename Dtype>
void caffe_gpu_set(const size_t N, const Dtype alpha, Dtype* Y, bool sync, cudaStream_t stream) {
if (stream == nullptr) {
stream = Caffe::thread_stream();
}
if (alpha == 0) {
CUDA_CHECK(cudaMemsetAsync(Y, 0, sizeof(Dtype) * N, stream)); // NOLINT(caffe/alt_fn)
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
}
if (sync) {
CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
template void
caffe_gpu_set<int>(const size_t N, const int alpha, int* Y, bool sync, cudaStream_t stream);
template void
caffe_gpu_set<float>(const size_t N, const float alpha, float* Y, bool sync, cudaStream_t stream);
template void caffe_gpu_set<double>(const size_t N, const double alpha, double* Y, bool sync,
cudaStream_t stream);
template void caffe_gpu_set<float16>(const size_t N, const float16 alpha, float16* Y, bool sync,
cudaStream_t stream);
template<typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template<>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators
add_scalar_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_add_scalar(const int N, const float16 alpha, float16* Y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, alpha, Y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template<>
__global__ void add_kernel<__half2>(const int n, const __half2* a, const __half2* b, __half2* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = hadd2(a[index], b[index]);
}
}
template<>
void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_add<float16>(const int N, const float16* a, const float16* b, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
const unsigned int n2 = even(N) / 2;
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<<<CAFFE_GET_BLOCKS_HALF(n2), CAFFE_CUDA_NUM_THREADS_HALF, 0, stream>>>
(n2, reinterpret_cast<const __half2*>(a), reinterpret_cast<const __half2*>(b),
reinterpret_cast<__half2*>(y));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template<>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void caffe_gpu_sub<float16>(const int N, const float16* a, const float16* b, float16* y) {
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N, a, b, y);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
} // namespace caffe
|
51dfb96313183d00209b9046d2052a565dde72e5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/cuda_call.cuh>
#include <fmt/format.h>
#include <cstdlib>
#include <stdexcept>
#include <string>
namespace nvbench
{
namespace cuda_call
{
void throw_error(const std::string &filename,
std::size_t lineno,
const std::string &command,
hipError_t error_code)
{
throw std::runtime_error(fmt::format("{}:{}: Cuda API call returned error: "
"{}: {}\nCommand: '{}'",
filename,
lineno,
hipGetErrorName(error_code),
hipGetErrorString(error_code),
command));
}
void exit_error(const std::string &filename,
std::size_t lineno,
const std::string &command,
hipError_t error_code)
{
fmt::print(stderr,
"{}:{}: Cuda API call returned error: {}: {}\nCommand: '{}'",
filename,
lineno,
hipGetErrorName(error_code),
hipGetErrorString(error_code),
command);
std::exit(EXIT_FAILURE);
}
} // namespace cuda_call
} // namespace nvbench
| 51dfb96313183d00209b9046d2052a565dde72e5.cu | /*
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/cuda_call.cuh>
#include <fmt/format.h>
#include <cstdlib>
#include <stdexcept>
#include <string>
namespace nvbench
{
namespace cuda_call
{
void throw_error(const std::string &filename,
std::size_t lineno,
const std::string &command,
cudaError_t error_code)
{
throw std::runtime_error(fmt::format("{}:{}: Cuda API call returned error: "
"{}: {}\nCommand: '{}'",
filename,
lineno,
cudaGetErrorName(error_code),
cudaGetErrorString(error_code),
command));
}
void exit_error(const std::string &filename,
std::size_t lineno,
const std::string &command,
cudaError_t error_code)
{
fmt::print(stderr,
"{}:{}: Cuda API call returned error: {}: {}\nCommand: '{}'",
filename,
lineno,
cudaGetErrorName(error_code),
cudaGetErrorString(error_code),
command);
std::exit(EXIT_FAILURE);
}
} // namespace cuda_call
} // namespace nvbench
|
d16adec3a72d1bee407813131e0467b9eb741f73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include <stdio.h>
#include <assert.h>
static const int NTHREADS = 32;
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel1(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int n_classes,
int64_t ignore_index) {
assert(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel.
int t = (int) *target - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
Dtype cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
*output = -cur_weight * input[t];
*total_weight = cur_weight;
if (size_average && *total_weight > 0) {
*output /= *total_weight;
}
}
}
template <typename Dtype>
__global__ void ClassNLLCriterion_updateOutput_no_reduce_kernel(
int batch_size,
THCDeviceTensor<Dtype, 2> input,
THCDeviceTensor<THCIndex_t, 1> target,
THCDeviceTensor<Dtype, 1> output,
Dtype *weights,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index] - TH_INDEX_BASE;
if (cur_target == ignore_index) {
output[index] = ScalarConvert<int, Dtype>::to(0);
continue;
}
Dtype weight =
weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1);
output[index] = -weight * input[index][cur_target];
}
}
template <typename Dtype>
__global__ void ClassNLLCriterion_updateGradInput_no_reduce_kernel(
int batch_size,
THCDeviceTensor<THCIndex_t, 1> target,
THCDeviceTensor<Dtype, 1> gradOutput,
THCDeviceTensor<Dtype, 2> gradInput,
Dtype *weights,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index] - TH_INDEX_BASE;
if (cur_target == ignore_index) {
continue;
}
Dtype weight =
weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1);
gradInput[index][cur_target] = -weight * gradOutput[index];
}
}
template <typename Dtype, typename Acctype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
__shared__ Acctype shInputs[NTHREADS], acc_weight[NTHREADS];
int i, t;
Dtype cur_weight;
shInputs[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
acc_weight[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = target[i] - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
shInputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel
if (threadIdx.x == 0) {
*output = *total_weight = ScalarConvert<int, Dtype>::to(0);
Acctype outputAcc = 0;
Acctype total_weightAcc = 0;
for (i = 0; i < NTHREADS; ++i){
// FIXME should we do somethigng here
outputAcc += shInputs[i];
total_weightAcc += acc_weight[i];
}
*total_weight = ScalarConvert<Acctype, Dtype>::to(total_weightAcc);
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc);
if (size_average && *total_weight > 0) {
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc / total_weightAcc);
}
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1(
Dtype* gradInput,
Dtype* gradOutput,
Dtype* weights,
THCIndex_t* target,
Dtype* total_weight,
int size_average,
int n_classes,
int64_t ignore_index)
{
if (*total_weight <= 0) {
return;
}
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
int t = (int)*target - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm * gradOutput[0];
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel(
Dtype *gradInput,
Dtype *gradOutput,
THCIndex_t *target,
Dtype *weights,
Dtype *total_weight,
int size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index)
{
if (*total_weight <= 0) {
return;
}
int i, t;
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = (int)target[i] - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[i * ndim + t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm * gradOutput[0];
}
}
}
#include "generic/ClassNLLCriterion.cu"
#include "THHGenerateFloatTypes.h"
| d16adec3a72d1bee407813131e0467b9eb741f73.cu | #include "THCUNN.h"
#include "common.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include <stdio.h>
#include <assert.h>
static const int NTHREADS = 32;
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel1(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int n_classes,
int64_t ignore_index) {
assert(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel.
int t = (int) *target - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
Dtype cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
*output = -cur_weight * input[t];
*total_weight = cur_weight;
if (size_average && *total_weight > 0) {
*output /= *total_weight;
}
}
}
template <typename Dtype>
__global__ void ClassNLLCriterion_updateOutput_no_reduce_kernel(
int batch_size,
THCDeviceTensor<Dtype, 2> input,
THCDeviceTensor<THCIndex_t, 1> target,
THCDeviceTensor<Dtype, 1> output,
Dtype *weights,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index] - TH_INDEX_BASE;
if (cur_target == ignore_index) {
output[index] = ScalarConvert<int, Dtype>::to(0);
continue;
}
Dtype weight =
weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1);
output[index] = -weight * input[index][cur_target];
}
}
template <typename Dtype>
__global__ void ClassNLLCriterion_updateGradInput_no_reduce_kernel(
int batch_size,
THCDeviceTensor<THCIndex_t, 1> target,
THCDeviceTensor<Dtype, 1> gradOutput,
THCDeviceTensor<Dtype, 2> gradInput,
Dtype *weights,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index] - TH_INDEX_BASE;
if (cur_target == ignore_index) {
continue;
}
Dtype weight =
weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1);
gradInput[index][cur_target] = -weight * gradOutput[index];
}
}
template <typename Dtype, typename Acctype>
__global__ void cunn_ClassNLLCriterion_updateOutput_kernel(Dtype *output,
Dtype *total_weight,
Dtype *input,
THCIndex_t *target,
Dtype *weights,
int size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
__shared__ Acctype shInputs[NTHREADS], acc_weight[NTHREADS];
int i, t;
Dtype cur_weight;
shInputs[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
acc_weight[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = target[i] - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);
shInputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
// TODO: T4951791 Reuse code between updateOutput_kernel1 and
// updateOutput_kernel
if (threadIdx.x == 0) {
*output = *total_weight = ScalarConvert<int, Dtype>::to(0);
Acctype outputAcc = 0;
Acctype total_weightAcc = 0;
for (i = 0; i < NTHREADS; ++i){
// FIXME should we do somethigng here
outputAcc += shInputs[i];
total_weightAcc += acc_weight[i];
}
*total_weight = ScalarConvert<Acctype, Dtype>::to(total_weightAcc);
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc);
if (size_average && *total_weight > 0) {
*output = ScalarConvert<Acctype, Dtype>::to(outputAcc / total_weightAcc);
}
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1(
Dtype* gradInput,
Dtype* gradOutput,
Dtype* weights,
THCIndex_t* target,
Dtype* total_weight,
int size_average,
int n_classes,
int64_t ignore_index)
{
if (*total_weight <= 0) {
return;
}
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
int t = (int)*target - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm * gradOutput[0];
}
}
template <typename Dtype>
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel(
Dtype *gradInput,
Dtype *gradOutput,
THCIndex_t *target,
Dtype *weights,
Dtype *total_weight,
int size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index)
{
if (*total_weight <= 0) {
return;
}
int i, t;
Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);
for (i = threadIdx.x; i < nframe; i += NTHREADS) {
t = (int)target[i] - TH_INDEX_BASE;
if (t != (int) ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[i * ndim + t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * norm * gradOutput[0];
}
}
}
#include "generic/ClassNLLCriterion.cu"
#include "THCGenerateFloatTypes.h"
|
aa8823c0dae7b948fdd438c348190f945723cdd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*** Calculating a derivative with CD ***/
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label) {
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess) {
char *e = (char *) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = hipGetLastError();
if (err != hipSuccess) {
char *e = (char *) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time() {
struct timeval tim;
hipDeviceSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec + (tim.tv_usec / 1000000.0);
}
__global__ void copy_array(float *u, float *u_prev, int N, int BSZ) {
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
u_prev[I] = u[I];
}
// GPU kernel
__global__ void update(float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ) {
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
//if (()>=N || j>){return;}
// if not boundary do
if ((I > N) && (I < N * N - 1 - N) && (I % N != 0) && (I % N != N - 1)) {
u[I] = u_prev[I] +
alpha * dt / (h * h) * (u_prev[I + 1] + u_prev[I - 1] + u_prev[I + N] + u_prev[I - N] - 4 * u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
void test(FILE *fp, int N, int BLOCKSIZE) {
hipSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax - xmin) / (N - 1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time / dt);
int I;
float *x = new float[N * N];
float *y = new float[N * N];
float *u = new float[N * N];
float *u_prev = new float[N * N];
// Generate mesh and intial condition
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
x[I] = xmin + h * i;
y[I] = ymin + h * j;
u[I] = 0.0f;
if ((i == 0) || (j == 0)) { u[I] = 200.0f; }
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
hipMalloc((void **) &u_d, N * N * sizeof(float));
hipMalloc((void **) &u_prev_d, N * N * sizeof(float));
// Copy to GPU
hipMemcpy(u_d, u, N * N * sizeof(float), hipMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N - 0.5) / BLOCKSIZE) + 1, int((N - 0.5) / BLOCKSIZE) + 1);
int grid = ((N - 0.5) / BLOCKSIZE) + 1;
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t = 0; t < steps; t++) {
hipLaunchKernelGGL(( copy_array) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, BLOCKSIZE);
hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout << "time = " << elapsed << std::endl;
fprintf(fp, "%i,%i,%i,%f,global\n",
N,
grid,
BLOCKSIZE,
elapsed
);
// Copy result back to host
hipMemcpy(u, u_d, N * N * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream temperature("temperature_global.txt");
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
// std::cout<<u[I]<<"\t";
temperature << x[I] << "\t" << y[I] << "\t" << u[I] << std::endl;
}
temperature << "\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
hipFree(u_d);
hipFree(u_prev_d);
}
int main() {
FILE *fp;
if ((fp = fopen("global.csv", "a")) == NULL) {
printf("Can't open .csv in append mode!\n");
exit(1);
}
fprintf(fp, "N,blocksPerGrid,threadsPerBlock,timeGPU,type\n");
// Allocate in CPU
int N = 128;
int BLOCKSIZE = 16;
for (int BLOCKSIZE = 2; BLOCKSIZE <= 32; BLOCKSIZE *= 2) {
test(fp, N, BLOCKSIZE);
}
}
| aa8823c0dae7b948fdd438c348190f945723cdd9.cu | /*** Calculating a derivative with CD ***/
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label) {
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess) {
char *e = (char *) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess) {
char *e = (char *) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time() {
struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec + (tim.tv_usec / 1000000.0);
}
__global__ void copy_array(float *u, float *u_prev, int N, int BSZ) {
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
u_prev[I] = u[I];
}
// GPU kernel
__global__ void update(float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ) {
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
//if (()>=N || j>){return;}
// if not boundary do
if ((I > N) && (I < N * N - 1 - N) && (I % N != 0) && (I % N != N - 1)) {
u[I] = u_prev[I] +
alpha * dt / (h * h) * (u_prev[I + 1] + u_prev[I - 1] + u_prev[I + N] + u_prev[I - N] - 4 * u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
void test(FILE *fp, int N, int BLOCKSIZE) {
cudaSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax - xmin) / (N - 1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time / dt);
int I;
float *x = new float[N * N];
float *y = new float[N * N];
float *u = new float[N * N];
float *u_prev = new float[N * N];
// Generate mesh and intial condition
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
x[I] = xmin + h * i;
y[I] = ymin + h * j;
u[I] = 0.0f;
if ((i == 0) || (j == 0)) { u[I] = 200.0f; }
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
cudaMalloc((void **) &u_d, N * N * sizeof(float));
cudaMalloc((void **) &u_prev_d, N * N * sizeof(float));
// Copy to GPU
cudaMemcpy(u_d, u, N * N * sizeof(float), cudaMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N - 0.5) / BLOCKSIZE) + 1, int((N - 0.5) / BLOCKSIZE) + 1);
int grid = ((N - 0.5) / BLOCKSIZE) + 1;
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t = 0; t < steps; t++) {
copy_array <<<dimGrid, dimBlock>>>(u_d, u_prev_d, N, BLOCKSIZE);
update <<<dimGrid, dimBlock>>>(u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout << "time = " << elapsed << std::endl;
fprintf(fp, "%i,%i,%i,%f,global\n",
N,
grid,
BLOCKSIZE,
elapsed
);
// Copy result back to host
cudaMemcpy(u, u_d, N * N * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream temperature("temperature_global.txt");
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
// std::cout<<u[I]<<"\t";
temperature << x[I] << "\t" << y[I] << "\t" << u[I] << std::endl;
}
temperature << "\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
cudaFree(u_d);
cudaFree(u_prev_d);
}
int main() {
FILE *fp;
if ((fp = fopen("global.csv", "a")) == NULL) {
printf("Can't open .csv in append mode!\n");
exit(1);
}
fprintf(fp, "N,blocksPerGrid,threadsPerBlock,timeGPU,type\n");
// Allocate in CPU
int N = 128;
int BLOCKSIZE = 16;
for (int BLOCKSIZE = 2; BLOCKSIZE <= 32; BLOCKSIZE *= 2) {
test(fp, N, BLOCKSIZE);
}
}
|
9a6a86374c7a5d1c452a3fe16a3d2eb54da21373.hip | // !!! This is a file automatically generated by hipify!!!
/**
* File reading functions
* @file FilesReading.h
* @author Adam Koleszar ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include "FilesReading.h"
#include "ArrayUtils.h"
#include "TestUtils.h"
int getNumberOfLines(const char *filename)
{
FILE *f = fopen(filename, "r");
if (!f)
{
fprintf(stderr, "Error reading file %s: %s\n", filename, strerror(errno));
return 0;
}
int lines=0;
while (!feof(f))
{
if (fgetc(f) == '\n')
{
lines++;
}
}
fclose(f);
return lines;
}
/**
* @brief Set enum options from init file
*
* @param args input parameters
* @param ipr inlet profile
* @param coll collision model
* @param curved boundary type
* @param opr outlet profile
* @param format output format
*/
void setOptions(Arguments *args, int ipr, int coll, int curved, int opr, int format)
{
args->inletProfile = (InletProfile)ipr;
args->collisionModel = (CollisionModel)coll;
args->boundaryType = (BoundaryType)curved;
args->outletProfile = (OutletProfile)opr;
args->outputFormat = (OutputFormat)format;
}
void readInitFile(const char* filename, Arguments *args)
{
int ipr, coll, curved, opr, format;
FILE *f_init = fopen(filename,"r");
fscanf(f_init,FLOAT_FORMAT, &(args->u));
fscanf(f_init,FLOAT_FORMAT, &(args->v));
fscanf(f_init,FLOAT_FORMAT, &(args->rho));
fscanf(f_init,FLOAT_FORMAT, &(args->viscosity));
fscanf(f_init,"%d", &ipr);
fscanf(f_init,"%d", &coll);
fscanf(f_init,"%d", &curved);
fscanf(f_init,"%d", &opr);
fscanf(f_init,"%d", &(args->iterations));
fscanf(f_init,"%d", &(args->autosaveEvery));
fscanf(f_init,"%d", &(args->autosaveAfter));
fscanf(f_init,"%d", &format);
fscanf(f_init,"%d", &(args->boundaryId));
fclose(f_init);
setOptions(args, ipr, coll, curved, opr, format);
}
int readNodeFile(const char *filename, int **ni, int **nj, FLOAT_TYPE **nx, FLOAT_TYPE **ny, int **nf)
{
int n = getNumberOfLines(filename);
if (!n)
{
return 0;
}
*ni = createHostArrayInt(n);
*nj = createHostArrayInt(n);
*nx = createHostArrayFlt(n);
*ny = createHostArrayFlt(n);
*nf = createHostArrayInt(n);
FILE *f = fopen(filename, "r");
int i;
for (i=0; i<n; ++i)
{
fscanf(f, "%d %d "FLOAT_FORMAT" "FLOAT_FORMAT" %d", (*ni)+i, (*nj)+i, (*nx)+i, (*ny)+i, (*nf)+i);
}
fclose(f);
return n;
}
int readConnFile(const char *filename, int **ni, int **nj, int **dir, int **bc,
FLOAT_TYPE **bcx, FLOAT_TYPE **bcy, int **id)
{
int n = getNumberOfLines(filename);
if (!n)
{
return 0;
}
*ni = createHostArrayInt(n);
*nj = createHostArrayInt(n);
*dir = createHostArrayInt(n);
*bc = createHostArrayInt(n);
*bcx = createHostArrayFlt(n);
*bcy = createHostArrayFlt(n);
*id = createHostArrayInt(n);
FILE *f = fopen(filename, "r");
int i;
for (i=0; i<n; ++i)
{
fscanf(f, "%d %d %d %d "FLOAT_FORMAT" "FLOAT_FORMAT" %d", (*ni)+i, (*nj)+i, (*dir)+i,
(*bc)+i, (*bcx)+i, (*bcy)+i,
(*id)+i);
}
fclose(f);
return n;
}
int readResultFile(const char *filename, FLOAT_TYPE ***results, int **fluid)
{
int i,n = getNumberOfLines(filename);
if (!n)
{
return 0;
}
FILE *f = fopen(filename, "r");
char firstline[256];
fscanf(f, "%s", firstline);
n -= 1;
*fluid = createHostArrayInt(n);
*results = create2DHostArrayFlt(n, 7);
for (i=0; i<n; ++i)
{
fscanf(f, FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", %d",
results[0][0]+i, results[0][1]+i, results[0][2]+i, results[0][3]+i, results[0][4]+i, results[0][5]+i, results[0][6]+i, (*fluid)+i);
}
fclose(f);
return n;
}
__host__ int compareFiles(const char* f1, const char* f2)
{
printf("Comparing results\n");
int l1 = getNumberOfLines(f1);
int l2 = getNumberOfLines(f2);
if (l1 != l2)
{
printf("Line number mismatch\n");
exit(1);
}
int i;
const char *columnName[] = {"x", "y", "u", "v", "vel_mag", "rho", "pressure"};
int *fluid1;
int *fluid2;
FLOAT_TYPE **res1;
FLOAT_TYPE **res2;
printf("Reading files...");
readResultFile(f1, &res1, &fluid1);
printf("...");
readResultFile(f2, &res2, &fluid2);
printf("...done\nComparing results...\n");
dim3 gridDim(l1/THREADS + 1);
FLOAT_TYPE *da = createGpuArrayFlt(l1);
FLOAT_TYPE *db = createGpuArrayFlt(l1);
FLOAT_TYPE *dc = createGpuArrayFlt(l1);
FLOAT_TYPE *dd = createGpuArrayFlt(l1);
FLOAT_TYPE result[7];
for (i=0; i<7; ++i)
{
hipMemcpy(da, res1[i], SIZEFLT(l1), hipMemcpyHostToDevice);
hipMemcpy(db, res2[i], SIZEFLT(l1), hipMemcpyHostToDevice);
result[i] = compareGpuArraySumFlt(da, db, dc, dd, l1);
}
int fluid = 0;
for (i=0; i<l1; ++i)
{
fluid += (fluid1[i]-fluid2[i]);
}
printf(" array | diff | diff/nodes\n"
"----------------------------------------------\n");
printf(" fluids | %15d | %15d\n", fluid, fluid/l1);
int b = 0;
for (i=0; i<7; ++i)
{
printf("%10s | %15g | %15g\n", columnName[i], result[i], result[i]/l1);
b |= result[i] > 0.001;
free(res1[i]);
free(res2[i]);
}
free(fluid1); free(fluid2);
free(res1); free(res2);
return b;
}
int getLastValue(int *arr, int n)
{
return arr[n-1] + 1;
}
FLOAT_TYPE getGridSpacing(int *ni, int *nj, FLOAT_TYPE *nx, int n)
{
int i;
FLOAT_TYPE delta1, delta2;
for (i=0; i<n; ++i)
{
if (ni[i] == 0 && nj[i] == 0)
{
delta1 = nx[i];
break;
}
}
for (i=0; i<n; ++i)
{
if (ni[i] == 1 && nj[i] == 0)
{
delta2 = nx[i];
break;
}
}
return (FLOAT_TYPE)fabs(delta2 - delta1);
}
int getNumInletNodes(int *bc, int *dir, int n)
{
int i;
int nodes = 0;
for (i=0; i<n; ++i)
{
if (bc[i] == 2 && dir[i] >= 1 && dir[i] <= 4)
{
++nodes;
}
}
return nodes;
}
FLOAT_TYPE getMaxInletCoordY(int *bc, int *dir, FLOAT_TYPE *bcy, FLOAT_TYPE delta, int n)
{
int i=0;
FLOAT_TYPE maxY;
while (bc[i] != 2) //inlet
{
maxY = bcy[++i];
}
for (i=0; i<n; ++i)
{
if (bc[i] == 2 && dir[i] >= 1 && dir[i] <= 4)
{
maxY = (bcy[i] > maxY) ? bcy[i] : maxY;
}
}
return maxY + delta/2;
}
FLOAT_TYPE getMinInletCoordY(int *bc, int *dir, FLOAT_TYPE *bcy, FLOAT_TYPE delta, int n)
{
int i=0;
FLOAT_TYPE minY;
while (bc[i] != 2) //inlet
{
minY = bcy[++i];
}
for (i=0; i<n; ++i)
{
if (bc[i] == 2 && dir[i] >= 1 && dir[i] <= 4)
{
minY = (bcy[i] < minY) ? bcy[i] : minY;
}
}
return minY - delta/2;
}
void CompDataNode(FLOAT_TYPE *Delta, int *m, int *n, int *ni, int *nj, FLOAT_TYPE *nx, int size)
{
int i; // variable for the loop
FLOAT_TYPE DeltaP1, DeltaP2; // local grid spacing
*n = *(ni+size-1)+1; // number of rows
*m = *(nj+size-1)+1; // number of columns
for(i=0;i<size;i++)
{
if(*(ni+i)==0 && *(nj+i)==0)
{
DeltaP1=*(nx+i);
}
if(*(ni+i)==1 && *(nj+i)==0)
{
DeltaP2=*(nx+i);
}
}
*Delta = (max(DeltaP1,DeltaP2)-min(DeltaP1,DeltaP2)); // grid spacing
}
void CompDataConn(int* NumInletNodes, FLOAT_TYPE* MaxInletCoordY,
FLOAT_TYPE* MinInletCoordY, int* BCconn0, int* BCconn1, int* BCconn2,
int* BCconn3, FLOAT_TYPE* BCconn4, FLOAT_TYPE* BCconn5, int* BCconn6, int* NumConn, FLOAT_TYPE* Delta)
{
int i=0; // counter
while(*(BCconn3+i)!=2)
{
MaxInletCoordY[0] = *(BCconn5+i+1); // maximum Y coordinate of the inlet line
MinInletCoordY[0] = *(BCconn5+i+1); // minimum Y coordinate of the inlet line
i++;
}
*NumInletNodes = 0; // number of inlet nodes
for (i=0; i< *NumConn;i++)
{
if(*(BCconn3+i)==2){
if(*(BCconn2+i)==1 || *(BCconn2+i)==2 || *(BCconn2+i)==3 || *(BCconn2+i)==4){
if(*(BCconn5+i)>*MaxInletCoordY){
*MaxInletCoordY = *(BCconn5+i);
}
if(*(BCconn5+i)<MinInletCoordY[0]){
*MinInletCoordY = *(BCconn5+i);
}
*NumInletNodes=*NumInletNodes+1;
}
}
}
(*MaxInletCoordY) = (*MaxInletCoordY)+(*Delta)/2;
(*MinInletCoordY) = (*MinInletCoordY)-(*Delta)/2;
}
| 9a6a86374c7a5d1c452a3fe16a3d2eb54da21373.cu | /**
* File reading functions
* @file FilesReading.h
* @author Adam Koleszar ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include "FilesReading.h"
#include "ArrayUtils.h"
#include "TestUtils.h"
int getNumberOfLines(const char *filename)
{
FILE *f = fopen(filename, "r");
if (!f)
{
fprintf(stderr, "Error reading file %s: %s\n", filename, strerror(errno));
return 0;
}
int lines=0;
while (!feof(f))
{
if (fgetc(f) == '\n')
{
lines++;
}
}
fclose(f);
return lines;
}
/**
* @brief Set enum options from init file
*
* @param args input parameters
* @param ipr inlet profile
* @param coll collision model
* @param curved boundary type
* @param opr outlet profile
* @param format output format
*/
void setOptions(Arguments *args, int ipr, int coll, int curved, int opr, int format)
{
args->inletProfile = (InletProfile)ipr;
args->collisionModel = (CollisionModel)coll;
args->boundaryType = (BoundaryType)curved;
args->outletProfile = (OutletProfile)opr;
args->outputFormat = (OutputFormat)format;
}
void readInitFile(const char* filename, Arguments *args)
{
int ipr, coll, curved, opr, format;
FILE *f_init = fopen(filename,"r");
fscanf(f_init,FLOAT_FORMAT, &(args->u));
fscanf(f_init,FLOAT_FORMAT, &(args->v));
fscanf(f_init,FLOAT_FORMAT, &(args->rho));
fscanf(f_init,FLOAT_FORMAT, &(args->viscosity));
fscanf(f_init,"%d", &ipr);
fscanf(f_init,"%d", &coll);
fscanf(f_init,"%d", &curved);
fscanf(f_init,"%d", &opr);
fscanf(f_init,"%d", &(args->iterations));
fscanf(f_init,"%d", &(args->autosaveEvery));
fscanf(f_init,"%d", &(args->autosaveAfter));
fscanf(f_init,"%d", &format);
fscanf(f_init,"%d", &(args->boundaryId));
fclose(f_init);
setOptions(args, ipr, coll, curved, opr, format);
}
int readNodeFile(const char *filename, int **ni, int **nj, FLOAT_TYPE **nx, FLOAT_TYPE **ny, int **nf)
{
int n = getNumberOfLines(filename);
if (!n)
{
return 0;
}
*ni = createHostArrayInt(n);
*nj = createHostArrayInt(n);
*nx = createHostArrayFlt(n);
*ny = createHostArrayFlt(n);
*nf = createHostArrayInt(n);
FILE *f = fopen(filename, "r");
int i;
for (i=0; i<n; ++i)
{
fscanf(f, "%d %d "FLOAT_FORMAT" "FLOAT_FORMAT" %d", (*ni)+i, (*nj)+i, (*nx)+i, (*ny)+i, (*nf)+i);
}
fclose(f);
return n;
}
int readConnFile(const char *filename, int **ni, int **nj, int **dir, int **bc,
FLOAT_TYPE **bcx, FLOAT_TYPE **bcy, int **id)
{
int n = getNumberOfLines(filename);
if (!n)
{
return 0;
}
*ni = createHostArrayInt(n);
*nj = createHostArrayInt(n);
*dir = createHostArrayInt(n);
*bc = createHostArrayInt(n);
*bcx = createHostArrayFlt(n);
*bcy = createHostArrayFlt(n);
*id = createHostArrayInt(n);
FILE *f = fopen(filename, "r");
int i;
for (i=0; i<n; ++i)
{
fscanf(f, "%d %d %d %d "FLOAT_FORMAT" "FLOAT_FORMAT" %d", (*ni)+i, (*nj)+i, (*dir)+i,
(*bc)+i, (*bcx)+i, (*bcy)+i,
(*id)+i);
}
fclose(f);
return n;
}
int readResultFile(const char *filename, FLOAT_TYPE ***results, int **fluid)
{
int i,n = getNumberOfLines(filename);
if (!n)
{
return 0;
}
FILE *f = fopen(filename, "r");
char firstline[256];
fscanf(f, "%s", firstline);
n -= 1;
*fluid = createHostArrayInt(n);
*results = create2DHostArrayFlt(n, 7);
for (i=0; i<n; ++i)
{
fscanf(f, FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", "FLOAT_FORMAT", %d",
results[0][0]+i, results[0][1]+i, results[0][2]+i, results[0][3]+i, results[0][4]+i, results[0][5]+i, results[0][6]+i, (*fluid)+i);
}
fclose(f);
return n;
}
__host__ int compareFiles(const char* f1, const char* f2)
{
printf("Comparing results\n");
int l1 = getNumberOfLines(f1);
int l2 = getNumberOfLines(f2);
if (l1 != l2)
{
printf("Line number mismatch\n");
exit(1);
}
int i;
const char *columnName[] = {"x", "y", "u", "v", "vel_mag", "rho", "pressure"};
int *fluid1;
int *fluid2;
FLOAT_TYPE **res1;
FLOAT_TYPE **res2;
printf("Reading files...");
readResultFile(f1, &res1, &fluid1);
printf("...");
readResultFile(f2, &res2, &fluid2);
printf("...done\nComparing results...\n");
dim3 gridDim(l1/THREADS + 1);
FLOAT_TYPE *da = createGpuArrayFlt(l1);
FLOAT_TYPE *db = createGpuArrayFlt(l1);
FLOAT_TYPE *dc = createGpuArrayFlt(l1);
FLOAT_TYPE *dd = createGpuArrayFlt(l1);
FLOAT_TYPE result[7];
for (i=0; i<7; ++i)
{
cudaMemcpy(da, res1[i], SIZEFLT(l1), cudaMemcpyHostToDevice);
cudaMemcpy(db, res2[i], SIZEFLT(l1), cudaMemcpyHostToDevice);
result[i] = compareGpuArraySumFlt(da, db, dc, dd, l1);
}
int fluid = 0;
for (i=0; i<l1; ++i)
{
fluid += (fluid1[i]-fluid2[i]);
}
printf(" array | diff | diff/nodes\n"
"----------------------------------------------\n");
printf(" fluids | %15d | %15d\n", fluid, fluid/l1);
int b = 0;
for (i=0; i<7; ++i)
{
printf("%10s | %15g | %15g\n", columnName[i], result[i], result[i]/l1);
b |= result[i] > 0.001;
free(res1[i]);
free(res2[i]);
}
free(fluid1); free(fluid2);
free(res1); free(res2);
return b;
}
int getLastValue(int *arr, int n)
{
return arr[n-1] + 1;
}
FLOAT_TYPE getGridSpacing(int *ni, int *nj, FLOAT_TYPE *nx, int n)
{
int i;
FLOAT_TYPE delta1, delta2;
for (i=0; i<n; ++i)
{
if (ni[i] == 0 && nj[i] == 0)
{
delta1 = nx[i];
break;
}
}
for (i=0; i<n; ++i)
{
if (ni[i] == 1 && nj[i] == 0)
{
delta2 = nx[i];
break;
}
}
return (FLOAT_TYPE)fabs(delta2 - delta1);
}
int getNumInletNodes(int *bc, int *dir, int n)
{
int i;
int nodes = 0;
for (i=0; i<n; ++i)
{
if (bc[i] == 2 && dir[i] >= 1 && dir[i] <= 4)
{
++nodes;
}
}
return nodes;
}
FLOAT_TYPE getMaxInletCoordY(int *bc, int *dir, FLOAT_TYPE *bcy, FLOAT_TYPE delta, int n)
{
int i=0;
FLOAT_TYPE maxY;
while (bc[i] != 2) //inlet
{
maxY = bcy[++i];
}
for (i=0; i<n; ++i)
{
if (bc[i] == 2 && dir[i] >= 1 && dir[i] <= 4)
{
maxY = (bcy[i] > maxY) ? bcy[i] : maxY;
}
}
return maxY + delta/2;
}
FLOAT_TYPE getMinInletCoordY(int *bc, int *dir, FLOAT_TYPE *bcy, FLOAT_TYPE delta, int n)
{
int i=0;
FLOAT_TYPE minY;
while (bc[i] != 2) //inlet
{
minY = bcy[++i];
}
for (i=0; i<n; ++i)
{
if (bc[i] == 2 && dir[i] >= 1 && dir[i] <= 4)
{
minY = (bcy[i] < minY) ? bcy[i] : minY;
}
}
return minY - delta/2;
}
void CompDataNode(FLOAT_TYPE *Delta, int *m, int *n, int *ni, int *nj, FLOAT_TYPE *nx, int size)
{
int i; // variable for the loop
FLOAT_TYPE DeltaP1, DeltaP2; // local grid spacing
*n = *(ni+size-1)+1; // number of rows
*m = *(nj+size-1)+1; // number of columns
for(i=0;i<size;i++)
{
if(*(ni+i)==0 && *(nj+i)==0)
{
DeltaP1=*(nx+i);
}
if(*(ni+i)==1 && *(nj+i)==0)
{
DeltaP2=*(nx+i);
}
}
*Delta = (max(DeltaP1,DeltaP2)-min(DeltaP1,DeltaP2)); // grid spacing
}
void CompDataConn(int* NumInletNodes, FLOAT_TYPE* MaxInletCoordY,
FLOAT_TYPE* MinInletCoordY, int* BCconn0, int* BCconn1, int* BCconn2,
int* BCconn3, FLOAT_TYPE* BCconn4, FLOAT_TYPE* BCconn5, int* BCconn6, int* NumConn, FLOAT_TYPE* Delta)
{
int i=0; // counter
while(*(BCconn3+i)!=2)
{
MaxInletCoordY[0] = *(BCconn5+i+1); // maximum Y coordinate of the inlet line
MinInletCoordY[0] = *(BCconn5+i+1); // minimum Y coordinate of the inlet line
i++;
}
*NumInletNodes = 0; // number of inlet nodes
for (i=0; i< *NumConn;i++)
{
if(*(BCconn3+i)==2){
if(*(BCconn2+i)==1 || *(BCconn2+i)==2 || *(BCconn2+i)==3 || *(BCconn2+i)==4){
if(*(BCconn5+i)>*MaxInletCoordY){
*MaxInletCoordY = *(BCconn5+i);
}
if(*(BCconn5+i)<MinInletCoordY[0]){
*MinInletCoordY = *(BCconn5+i);
}
*NumInletNodes=*NumInletNodes+1;
}
}
}
(*MaxInletCoordY) = (*MaxInletCoordY)+(*Delta)/2;
(*MinInletCoordY) = (*MinInletCoordY)-(*Delta)/2;
}
|
c7cdd4569df7787957f2be8d87803ea7ee8d441c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void dwt_per_X_O(float *d_ip, int rows, int cols, int cA_cols, int filt_len, int Halo_steps, float *d_cL, float *d_cH)
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * 2 * X_RESULT_STEPS - Halo_steps) * X_BLOCKDIM_X + threadIdx.x;
const int baseX1 = (blockIdx.x * X_RESULT_STEPS) * X_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * X_BLOCKDIM_Y + threadIdx.y;
if (baseY < rows) {
d_ip += baseY * cols + baseX;
d_cL += baseY * cA_cols + baseX1;
d_cH += baseY * cA_cols + baseX1;
//Loading data to shared memory
//Load Left Halo
#pragma unroll
for (int i = 0; i < Halo_steps; i++)
{
if (baseX + i * X_BLOCKDIM_X == -1) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[cols - 1];
else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = (baseX + i * X_BLOCKDIM_X >= 0) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X + cols + 1];
}
// main data and Load right halo
#pragma unroll
for (int i = Halo_steps; i < Halo_steps + 2 * X_RESULT_STEPS + Halo_steps; i++)
{
if (baseX + i * X_BLOCKDIM_X == cols) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[i * X_BLOCKDIM_X - 1];
else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = ((baseX + i * X_BLOCKDIM_X) < cols) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X - cols - 1];
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = 0; i < X_RESULT_STEPS; i++)
{
if ((baseX1 + i * X_BLOCKDIM_X < cA_cols))
{
float sum_cL = 0, sum_cH = 0;
int l2 = filt_len / 2;
for (int l = 0; l < filt_len; ++l)
{
sum_cL += c_lpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l]; //l2-l is to select the right center pixels with odd and even sized filters
sum_cH += c_hpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l];
}
d_cL[i * X_BLOCKDIM_X] = sum_cL;
d_cH[i * X_BLOCKDIM_X] = sum_cH;
}
}
}
} | c7cdd4569df7787957f2be8d87803ea7ee8d441c.cu | #include "includes.h"
__global__ void dwt_per_X_O(float *d_ip, int rows, int cols, int cA_cols, int filt_len, int Halo_steps, float *d_cL, float *d_cH)
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * 2 * X_RESULT_STEPS - Halo_steps) * X_BLOCKDIM_X + threadIdx.x;
const int baseX1 = (blockIdx.x * X_RESULT_STEPS) * X_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * X_BLOCKDIM_Y + threadIdx.y;
if (baseY < rows) {
d_ip += baseY * cols + baseX;
d_cL += baseY * cA_cols + baseX1;
d_cH += baseY * cA_cols + baseX1;
//Loading data to shared memory
//Load Left Halo
#pragma unroll
for (int i = 0; i < Halo_steps; i++)
{
if (baseX + i * X_BLOCKDIM_X == -1) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[cols - 1];
else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = (baseX + i * X_BLOCKDIM_X >= 0) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X + cols + 1];
}
// main data and Load right halo
#pragma unroll
for (int i = Halo_steps; i < Halo_steps + 2 * X_RESULT_STEPS + Halo_steps; i++)
{
if (baseX + i * X_BLOCKDIM_X == cols) s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = d_ip[i * X_BLOCKDIM_X - 1];
else s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x + i * X_BLOCKDIM_X] = ((baseX + i * X_BLOCKDIM_X) < cols) ? d_ip[i * X_BLOCKDIM_X] : d_ip[i * X_BLOCKDIM_X - cols - 1];
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = 0; i < X_RESULT_STEPS; i++)
{
if ((baseX1 + i * X_BLOCKDIM_X < cA_cols))
{
float sum_cL = 0, sum_cH = 0;
int l2 = filt_len / 2;
for (int l = 0; l < filt_len; ++l)
{
sum_cL += c_lpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l]; //l2-l is to select the right center pixels with odd and even sized filters
sum_cH += c_hpd[l] * s_Data[(threadIdx.y*(2 * X_RESULT_STEPS + 2 * Halo_steps)*X_BLOCKDIM_X) + threadIdx.x * 2 + Halo_steps*X_BLOCKDIM_X + 2 * i * X_BLOCKDIM_X + l2 - l];
}
d_cL[i * X_BLOCKDIM_X] = sum_cL;
d_cH[i * X_BLOCKDIM_X] = sum_cH;
}
}
}
} |
005b0e4672515592658821f0f24f5f53ed2345f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include "gpu_hashtable.hpp"
__global__ void insert(GpuHashTable::hashCell *deviceHashTable,
unsigned int *keys,
unsigned int *values,
unsigned int slotsElems,
int numKeys) {
int index, position, idx;
unsigned int old;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numKeys)
return;
position = myHash(keys[idx], slotsElems);
index = position;
while (index < slotsElems) {
old = atomicCAS(&deviceHashTable[index].key, (unsigned int) 0, keys[idx]);
if (old == 0 || old == keys[idx]) {
deviceHashTable[index].value = values[idx];
return;
}
index++;
}
index = 0;
while (index < position) {
old = atomicCAS(&deviceHashTable[index].key, (unsigned int) 0, keys[idx]);
if (old == 0 || old == keys[idx]) {
deviceHashTable[index].value = values[idx];
return;
}
index++;
}
}
__global__ void reinsert(GpuHashTable::hashCell *newHashTable,
GpuHashTable::hashCell *copyHashTable,
unsigned int oldSize,
unsigned int slotsElems) {
int index, position, idx;
unsigned int old;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= oldSize)
return;
if (copyHashTable[idx].key == 0)
return;
position = myHash(copyHashTable[idx].key, slotsElems);
index = position;
while (index < slotsElems) {
old = atomicCAS(&newHashTable[index].key, (unsigned int) 0, copyHashTable[idx].key);
if (!old || old == copyHashTable[idx].key) {
newHashTable[index].value = copyHashTable[idx].value;
return;
}
index++;
}
index = 0;
while (index < position) {
old = atomicCAS(&newHashTable[index].key, (unsigned int) 0, copyHashTable[idx].key);
if (!old || old == copyHashTable[idx].key) {
newHashTable[index].value = copyHashTable[idx].value;
return;
}
index++;
}
}
__global__ void get(GpuHashTable::hashCell *deviceHashTable,
unsigned int *keys,
unsigned int *values,
unsigned int slotsElems,
int numKeys) {
int index, position, idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numKeys)
return;
position = myHash(keys[idx], slotsElems);
index = position;
while (index < slotsElems) {
if (deviceHashTable[index].key == keys[idx]) {
values[idx] = deviceHashTable[index].value;
return;
}
index++;
}
index = 0;
while (index < position) {
if (deviceHashTable[index].key == keys[idx]) {
values[idx] = deviceHashTable[index].value;
return;
}
index++;
}
}
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
hipMalloc((void **) &hashTable, size * sizeof(hashCell));
hipMemset(hashTable, 0, size * sizeof(hashCell));
slotsElems = size;
numElems = 0;
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
hipFree(hashTable);
slotsElems = 0;
numElems = 0;
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
hashCell *copyHashTable;
if (numElems) {
int mingridsize, threadblocksize, gridsize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, reinsert, 0, 0);
hipMalloc(©HashTable, slotsElems * sizeof(hashCell));
hipMemcpy(copyHashTable, hashTable, slotsElems * sizeof(hashCell), hipMemcpyDeviceToDevice);
hipFree(hashTable);
hipMalloc((void **) &hashTable, numBucketsReshape * sizeof(hashCell));
hipMemset(hashTable, 0, numBucketsReshape * sizeof(hashCell));
gridsize = ((unsigned int)slotsElems + threadblocksize - 1) / threadblocksize;
hipLaunchKernelGGL(( reinsert), dim3(gridsize), dim3(threadblocksize), 0, 0, hashTable, copyHashTable, slotsElems, numBucketsReshape);
hipDeviceSynchronize();
slotsElems = numBucketsReshape;
hipFree(copyHashTable);
return;
}
hipFree(hashTable);
hipMalloc((void **) &hashTable, numBucketsReshape * sizeof(hashCell));
hipMemset(hashTable, 0, numBucketsReshape * sizeof(hashCell));
slotsElems = numBucketsReshape;
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int *values, int numKeys) {
int mingridsize;
int threadblocksize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, insert, 0, 0);
int gridsize = ((unsigned int)numKeys + threadblocksize - 1) / threadblocksize;
unsigned int *deviceKeys, *deviceValues;
hipMalloc(&deviceKeys, numKeys * sizeof(int));
hipMemcpy(deviceKeys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&deviceValues, numKeys * sizeof(int));
hipMemcpy(deviceValues, values, numKeys * sizeof(int), hipMemcpyHostToDevice);
if ((float)(numElems + numKeys) / slotsElems > 0.95f)
reshape((numElems + numKeys) * 1.25f);
hipLaunchKernelGGL(( insert), dim3(gridsize), dim3(threadblocksize), 0, 0, hashTable, deviceKeys, deviceValues, slotsElems, numKeys);
hipDeviceSynchronize();
hipFree(deviceKeys);
hipFree(deviceValues);
numElems += numKeys;
return true;
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
unsigned int *deviceKeys, *deviceValues;
int *hostValues;
int mingridsize, threadblocksize, gridsize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, get, 0, 0);
hipMalloc(&deviceKeys, numKeys * sizeof(int));
hipMemcpy(deviceKeys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&deviceValues, numKeys * sizeof(int));
hipMemset(deviceValues, 0, numKeys * sizeof(int));
hostValues = (int *) malloc(numKeys * sizeof(int));
gridsize = ((unsigned int)numKeys + threadblocksize - 1) / threadblocksize;
hipLaunchKernelGGL(( get), dim3(gridsize), dim3(threadblocksize), 0, 0, hashTable, deviceKeys, deviceValues, slotsElems, numKeys);
hipDeviceSynchronize();
hipMemcpy(hostValues, deviceValues, numKeys * sizeof(int), hipMemcpyDeviceToHost);
hipFree(deviceKeys);
hipFree(deviceValues);
return hostValues;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
float loadFactor = 0.f;
loadFactor = (float) numElems / slotsElems;
return loadFactor; // no larger than 1.0f = 100%
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#include "test_map.cpp"
| 005b0e4672515592658821f0f24f5f53ed2345f6.cu | #include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include "gpu_hashtable.hpp"
__global__ void insert(GpuHashTable::hashCell *deviceHashTable,
unsigned int *keys,
unsigned int *values,
unsigned int slotsElems,
int numKeys) {
int index, position, idx;
unsigned int old;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numKeys)
return;
position = myHash(keys[idx], slotsElems);
index = position;
while (index < slotsElems) {
old = atomicCAS(&deviceHashTable[index].key, (unsigned int) 0, keys[idx]);
if (old == 0 || old == keys[idx]) {
deviceHashTable[index].value = values[idx];
return;
}
index++;
}
index = 0;
while (index < position) {
old = atomicCAS(&deviceHashTable[index].key, (unsigned int) 0, keys[idx]);
if (old == 0 || old == keys[idx]) {
deviceHashTable[index].value = values[idx];
return;
}
index++;
}
}
__global__ void reinsert(GpuHashTable::hashCell *newHashTable,
GpuHashTable::hashCell *copyHashTable,
unsigned int oldSize,
unsigned int slotsElems) {
int index, position, idx;
unsigned int old;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= oldSize)
return;
if (copyHashTable[idx].key == 0)
return;
position = myHash(copyHashTable[idx].key, slotsElems);
index = position;
while (index < slotsElems) {
old = atomicCAS(&newHashTable[index].key, (unsigned int) 0, copyHashTable[idx].key);
if (!old || old == copyHashTable[idx].key) {
newHashTable[index].value = copyHashTable[idx].value;
return;
}
index++;
}
index = 0;
while (index < position) {
old = atomicCAS(&newHashTable[index].key, (unsigned int) 0, copyHashTable[idx].key);
if (!old || old == copyHashTable[idx].key) {
newHashTable[index].value = copyHashTable[idx].value;
return;
}
index++;
}
}
__global__ void get(GpuHashTable::hashCell *deviceHashTable,
unsigned int *keys,
unsigned int *values,
unsigned int slotsElems,
int numKeys) {
int index, position, idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numKeys)
return;
position = myHash(keys[idx], slotsElems);
index = position;
while (index < slotsElems) {
if (deviceHashTable[index].key == keys[idx]) {
values[idx] = deviceHashTable[index].value;
return;
}
index++;
}
index = 0;
while (index < position) {
if (deviceHashTable[index].key == keys[idx]) {
values[idx] = deviceHashTable[index].value;
return;
}
index++;
}
}
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
cudaMalloc((void **) &hashTable, size * sizeof(hashCell));
cudaMemset(hashTable, 0, size * sizeof(hashCell));
slotsElems = size;
numElems = 0;
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
cudaFree(hashTable);
slotsElems = 0;
numElems = 0;
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
hashCell *copyHashTable;
if (numElems) {
int mingridsize, threadblocksize, gridsize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, reinsert, 0, 0);
cudaMalloc(©HashTable, slotsElems * sizeof(hashCell));
cudaMemcpy(copyHashTable, hashTable, slotsElems * sizeof(hashCell), cudaMemcpyDeviceToDevice);
cudaFree(hashTable);
cudaMalloc((void **) &hashTable, numBucketsReshape * sizeof(hashCell));
cudaMemset(hashTable, 0, numBucketsReshape * sizeof(hashCell));
gridsize = ((unsigned int)slotsElems + threadblocksize - 1) / threadblocksize;
reinsert<<<gridsize, threadblocksize>>> (hashTable, copyHashTable, slotsElems, numBucketsReshape);
cudaDeviceSynchronize();
slotsElems = numBucketsReshape;
cudaFree(copyHashTable);
return;
}
cudaFree(hashTable);
cudaMalloc((void **) &hashTable, numBucketsReshape * sizeof(hashCell));
cudaMemset(hashTable, 0, numBucketsReshape * sizeof(hashCell));
slotsElems = numBucketsReshape;
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int *values, int numKeys) {
int mingridsize;
int threadblocksize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, insert, 0, 0);
int gridsize = ((unsigned int)numKeys + threadblocksize - 1) / threadblocksize;
unsigned int *deviceKeys, *deviceValues;
cudaMalloc(&deviceKeys, numKeys * sizeof(int));
cudaMemcpy(deviceKeys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&deviceValues, numKeys * sizeof(int));
cudaMemcpy(deviceValues, values, numKeys * sizeof(int), cudaMemcpyHostToDevice);
if ((float)(numElems + numKeys) / slotsElems > 0.95f)
reshape((numElems + numKeys) * 1.25f);
insert<<<gridsize, threadblocksize>>> (hashTable, deviceKeys, deviceValues, slotsElems, numKeys);
cudaDeviceSynchronize();
cudaFree(deviceKeys);
cudaFree(deviceValues);
numElems += numKeys;
return true;
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
unsigned int *deviceKeys, *deviceValues;
int *hostValues;
int mingridsize, threadblocksize, gridsize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, get, 0, 0);
cudaMalloc(&deviceKeys, numKeys * sizeof(int));
cudaMemcpy(deviceKeys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&deviceValues, numKeys * sizeof(int));
cudaMemset(deviceValues, 0, numKeys * sizeof(int));
hostValues = (int *) malloc(numKeys * sizeof(int));
gridsize = ((unsigned int)numKeys + threadblocksize - 1) / threadblocksize;
get<<<gridsize, threadblocksize>>> (hashTable, deviceKeys, deviceValues, slotsElems, numKeys);
cudaDeviceSynchronize();
cudaMemcpy(hostValues, deviceValues, numKeys * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(deviceKeys);
cudaFree(deviceValues);
return hostValues;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
float loadFactor = 0.f;
loadFactor = (float) numElems / slotsElems;
return loadFactor; // no larger than 1.0f = 100%
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#include "test_map.cpp"
|
e7bb4463d194fd50eac0db91425c34337bca6422.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
hipMallocManaged(&x,N,sizeof(float)*N);
hipMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks),dim3(blockSize), 0, 0, N,x,y);
hipDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
hipFree(x);
hipFree(y);
return 0;
}
| e7bb4463d194fd50eac0db91425c34337bca6422.cu | #include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n,float* a,float* b){
int index = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x*gridDim.x;
for(int i=index;i<n;i+=stride)
a[i] = a[i]+b[i];
}
int main(void){
int N=1<<20;
float *x,*y;
cudaMallocManaged(&x,N,sizeof(float)*N);
cudaMallocManaged(&y,N,sizeof(float)*N);
for(int i=0;i<N;i++){
x[i] = 1.f;
y[i] = 2.f;
}
int blockSize = 256;
int numBlocks = (N+blockSize-1)/blockSize;
add<<<numBlocks,blockSize>>>(N,x,y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for(int i=0;i<N;i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
cudaFree(x);
cudaFree(y);
return 0;
}
|
0051e309ca1c640a722dbed6510d7cc475477307.hip | // !!! This is a file automatically generated by hipify!!!
#include "math_functions.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//t+???
//t-???spike cluster?
//t+
//t-spike cluster
__global__ void t_v_alter_multispikev3( double *output, double *u, int *nSpikeClusters,int *t_alter, double *direction,const int *desired,const double * input, const int T_size, const int Numcurlayer, const double decay1, const double decay2, const double threshold, const int interval)
{
int neuron_id = blockIdx.x*blockDim.x +threadIdx.x;
if(neuron_id > Numcurlayer - 1 ) {return;}
int curid = neuron_id*T_size;
int endid = curid + T_size;
double m = 0;
double s = 0;
double e = 0;
double V = 0;
int nSpike_clusters = 0;
int nSpikes = 0;
double up_min = 1.7976931348623158e+308;
double down_max = -1.79769313486231570E+308;
double t_up_min = endid-1;
double t_down_max = t_up_min;
double V_nothr[2] = {0,0};// 0: othr_V - ?nothr_V; 1: othr_V
bool fired_pre = false;
bool incluster = false;
int dur = 0;
while(curid < endid)
{
m = m*decay1;
s = s*decay2;
//now, V is a tmp
V = input[curid];
if(V != 0)
{
m = m + V;
s = s + V;
}
e = e*decay1;
if (fired_pre)
{
e = e + threshold;
}
// now, fired_pre is fired_cur.
V = m -s - e;
u[curid] = V;
fired_pre = (V > threshold);
if(fired_pre)
{
output[curid] = 1.0;
incluster = true;
dur = 0;
nSpikes++;
}else{
dur++;
}
if(((dur>=interval)||(curid >=(endid-1)))&&(incluster == true)){
nSpike_clusters++;
incluster = false;
}
if((V_nothr[0]>0)&&(V_nothr[1] >V)){
// output[curid] = 1.0;
if((V_nothr[1]>threshold)&&(V_nothr[1]<up_min)){
// output[curid] = 2.0;
up_min = V_nothr[1];
t_up_min = curid-1;
}
else if((V_nothr[1]<=threshold)&&(V_nothr[1]>down_max)){
// output[curid] = 3.0;
down_max = V;
t_down_max = curid-1;
}
}
V_nothr[0] = V - V_nothr[1];
V_nothr[1] = V;
curid++;
}
nSpikeClusters[neuron_id] = nSpike_clusters;
if(nSpikes > desired[neuron_id]){
direction[neuron_id] = -1;
t_alter[neuron_id] = t_up_min -endid + T_size+1;//transform to matlab
}
else if(nSpikes < desired[neuron_id]){
direction[neuron_id] = 1;
t_alter[neuron_id] = t_down_max -endid + T_size+1;
}
}
| 0051e309ca1c640a722dbed6510d7cc475477307.cu | #include "math_functions.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//t+浣嶄簬闃堝?涓婃渶浣庢瀬澶у?鐐?
//t-浣嶄簬闃堝?涓嬫渶楂樻瀬澶у?鐐癸紝涓嶈?铏戞槸鍚﹀湪spike cluster涓?
//t+位于阈值上最低极大值点
//t-位于阈值下最高极大值点,不考虑是否在spike cluster中
__global__ void t_v_alter_multispikev3( double *output, double *u, int *nSpikeClusters,int *t_alter, double *direction,const int *desired,const double * input, const int T_size, const int Numcurlayer, const double decay1, const double decay2, const double threshold, const int interval)
{
int neuron_id = blockIdx.x*blockDim.x +threadIdx.x;
if(neuron_id > Numcurlayer - 1 ) {return;}
int curid = neuron_id*T_size;
int endid = curid + T_size;
double m = 0;
double s = 0;
double e = 0;
double V = 0;
int nSpike_clusters = 0;
int nSpikes = 0;
double up_min = 1.7976931348623158e+308;
double down_max = -1.79769313486231570E+308;
double t_up_min = endid-1;
double t_down_max = t_up_min;
double V_nothr[2] = {0,0};// 0: 涓婁竴鏃跺埢鐨刵othr_V - 涓婁笂涓?椂鍒荤殑nothr_V; 1: 涓婁竴鏃跺埢鐨刵othr_V
bool fired_pre = false;
bool incluster = false;
int dur = 0;
while(curid < endid)
{
m = m*decay1;
s = s*decay2;
//now, V is a tmp
V = input[curid];
if(V != 0)
{
m = m + V;
s = s + V;
}
e = e*decay1;
if (fired_pre)
{
e = e + threshold;
}
// now, fired_pre is fired_cur.
V = m -s - e;
u[curid] = V;
fired_pre = (V > threshold);
if(fired_pre)
{
output[curid] = 1.0;
incluster = true;
dur = 0;
nSpikes++;
}else{
dur++;
}
if(((dur>=interval)||(curid >=(endid-1)))&&(incluster == true)){
nSpike_clusters++;
incluster = false;
}
if((V_nothr[0]>0)&&(V_nothr[1] >V)){
// output[curid] = 1.0;
if((V_nothr[1]>threshold)&&(V_nothr[1]<up_min)){
// output[curid] = 2.0;
up_min = V_nothr[1];
t_up_min = curid-1;
}
else if((V_nothr[1]<=threshold)&&(V_nothr[1]>down_max)){
// output[curid] = 3.0;
down_max = V;
t_down_max = curid-1;
}
}
V_nothr[0] = V - V_nothr[1];
V_nothr[1] = V;
curid++;
}
nSpikeClusters[neuron_id] = nSpike_clusters;
if(nSpikes > desired[neuron_id]){
direction[neuron_id] = -1;
t_alter[neuron_id] = t_up_min -endid + T_size+1;//transform to matlab
}
else if(nSpikes < desired[neuron_id]){
direction[neuron_id] = 1;
t_alter[neuron_id] = t_down_max -endid + T_size+1;
}
}
|
9474a7d7c1f1efdbbe4c15ce750564745ba7edc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void bres_calc_gpu( const double *x1, const double *x2, const double *q1,
const double *adt1, double *res1, const int *bound) {
double dx, dy, mu, ri, p1, vol1, p2, vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f / q1[0];
p1 = gm1_cuda * (q1[3] - 0.5f * ri * (q1[1] * q1[1] + q1[2] * q1[2]));
if (*bound == 1) {
res1[1] += +p1 * dy;
res1[2] += -p1 * dx;
} else {
vol1 = ri * (q1[1] * dy - q1[2] * dx);
ri = 1.0f / qinf_cuda[0];
p2 = gm1_cuda * (qinf_cuda[3] - 0.5f * ri * (qinf_cuda[1] * qinf_cuda[1] + qinf_cuda[2] * qinf_cuda[2]));
vol2 = ri * (qinf_cuda[1] * dy - qinf_cuda[2] * dx);
mu = (*adt1) * eps_cuda;
f = 0.5f * (vol1 * q1[0] + vol2 * qinf_cuda[0]) + mu * (q1[0] - qinf_cuda[0]);
res1[0] += f;
f = 0.5f * (vol1 * q1[1] + p1 * dy + vol2 * qinf_cuda[1] + p2 * dy) +
mu * (q1[1] - qinf_cuda[1]);
res1[1] += f;
f = 0.5f * (vol1 * q1[2] - p1 * dx + vol2 * qinf_cuda[2] - p2 * dx) +
mu * (q1[2] - qinf_cuda[2]);
res1[2] += f;
f = 0.5f * (vol1 * (q1[3] + p1) + vol2 * (qinf_cuda[3] + p2)) +
mu * (q1[3] - qinf_cuda[3]);
res1[3] += f;
}
}
// CUDA kernel function
__global__ void op_cuda_bres_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
const int *__restrict arg5,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg4_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg4_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
//user-supplied kernel call
bres_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg2+map2idx*1,
arg4_l,
arg5+(n+offset_b)*1);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg4_l[0] += ind_arg3[0+map2idx*4];
arg4_l[1] += ind_arg3[1+map2idx*4];
arg4_l[2] += ind_arg3[2+map2idx*4];
arg4_l[3] += ind_arg3[3+map2idx*4];
ind_arg3[0+map2idx*4] = arg4_l[0];
ind_arg3[1+map2idx*4] = arg4_l[1];
ind_arg3[2+map2idx*4] = arg4_l[2];
ind_arg3[3+map2idx*4] = arg4_l[3];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(3);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg3.data_d,
(double *)arg4.data_d,
arg0.map_data_d,
arg2.map_data_d,
(int*)arg5.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
}
| 9474a7d7c1f1efdbbe4c15ce750564745ba7edc1.cu | //
// auto-generated by op2.py
//
//user function
__device__ void bres_calc_gpu( const double *x1, const double *x2, const double *q1,
const double *adt1, double *res1, const int *bound) {
double dx, dy, mu, ri, p1, vol1, p2, vol2, f;
dx = x1[0] - x2[0];
dy = x1[1] - x2[1];
ri = 1.0f / q1[0];
p1 = gm1_cuda * (q1[3] - 0.5f * ri * (q1[1] * q1[1] + q1[2] * q1[2]));
if (*bound == 1) {
res1[1] += +p1 * dy;
res1[2] += -p1 * dx;
} else {
vol1 = ri * (q1[1] * dy - q1[2] * dx);
ri = 1.0f / qinf_cuda[0];
p2 = gm1_cuda * (qinf_cuda[3] - 0.5f * ri * (qinf_cuda[1] * qinf_cuda[1] + qinf_cuda[2] * qinf_cuda[2]));
vol2 = ri * (qinf_cuda[1] * dy - qinf_cuda[2] * dx);
mu = (*adt1) * eps_cuda;
f = 0.5f * (vol1 * q1[0] + vol2 * qinf_cuda[0]) + mu * (q1[0] - qinf_cuda[0]);
res1[0] += f;
f = 0.5f * (vol1 * q1[1] + p1 * dy + vol2 * qinf_cuda[1] + p2 * dy) +
mu * (q1[1] - qinf_cuda[1]);
res1[1] += f;
f = 0.5f * (vol1 * q1[2] - p1 * dx + vol2 * qinf_cuda[2] - p2 * dx) +
mu * (q1[2] - qinf_cuda[2]);
res1[2] += f;
f = 0.5f * (vol1 * (q1[3] + p1) + vol2 * (qinf_cuda[3] + p2)) +
mu * (q1[3] - qinf_cuda[3]);
res1[3] += f;
}
}
// CUDA kernel function
__global__ void op_cuda_bres_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
const int *__restrict arg5,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg4_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg4_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
//user-supplied kernel call
bres_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg1+map2idx*4,
ind_arg2+map2idx*1,
arg4_l,
arg5+(n+offset_b)*1);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg4_l[0] += ind_arg3[0+map2idx*4];
arg4_l[1] += ind_arg3[1+map2idx*4];
arg4_l[2] += ind_arg3[2+map2idx*4];
arg4_l[3] += ind_arg3[3+map2idx*4];
ind_arg3[0+map2idx*4] = arg4_l[0];
ind_arg3[1+map2idx*4] = arg4_l[1];
ind_arg3[2+map2idx*4] = arg4_l[2];
ind_arg3[3+map2idx*4] = arg4_l[3];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(3);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_bres_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg3.data_d,
(double *)arg4.data_d,
arg0.map_data_d,
arg2.map_data_d,
(int*)arg5.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
}
|
63a1f5b9d9ad0312b31ee535163bcd3a46175b14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "timer.h"
#define NUM_THREADS 500000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 500
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g){
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv){
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
//increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH) , 0, 0, d_array);
// deviceSynchronize is not needed becuase eventRecord is used in the timer.
// When we want to time CUDA kernels we have two ways: i) eventRecord,
// ii) deviceSynchronize (only useful when using programming in a single stream)
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
}
| 63a1f5b9d9ad0312b31ee535163bcd3a46175b14.cu | #include <stdio.h>
#include "timer.h"
#define NUM_THREADS 500000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 500
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g){
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv){
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
//increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
increment_atomic<<< NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH >>>(d_array);
// deviceSynchronize is not needed becuase eventRecord is used in the timer.
// When we want to time CUDA kernels we have two ways: i) eventRecord,
// ii) deviceSynchronize (only useful when using programming in a single stream)
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
a766f50faa9c3f8ab3e2c800d3324e23e3f8f6d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Sub_V_S.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const float b = 1;
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Sub_V_S), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,out,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Sub_V_S), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Sub_V_S), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a766f50faa9c3f8ab3e2c800d3324e23e3f8f6d9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Sub_V_S.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const float b = 1;
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Sub_V_S<<<gridBlock,threadBlock>>>(a,b,out,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Sub_V_S<<<gridBlock,threadBlock>>>(a,b,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Sub_V_S<<<gridBlock,threadBlock>>>(a,b,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
54463403ea3e48d7478da2d216ad400b19696a61.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************\
|
| Copyright (C) 2009 Marc Stevens
|
| This program is free software: you can redistribute it and/or modify
| it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
| (at your option) any later version.
|
| This program is distributed in the hope that it will be useful,
| but WITHOUT ANY WARRANTY; without even the implied warranty of
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
|
| You should have received a copy of the GNU General Public License
| along with this program. If not, see <http://www.gnu.org/licenses/>.
|
\**************************************************************************/
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <hip/hip_runtime.h>
#include <boost/cstdint.hpp>
using namespace std;
typedef boost::uint32_t uint32;
typedef boost::uint64_t uint64;
#define TRAIL_NOCONSTRUCTOR
#include "birthday_types.hpp"
class cuda_device_detail {
public:
uint32 device;
uint32 blocks;
trail_type* buffer_host;
};
/* We assume that these are _thread specific_ (instead of global) storage managed by the cuda realtime libraries */
__device__ trail_type working_states2[122880];
__device__ trail_type buffer2[122880];
__constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4];
__constant__ uint32 precomp1[4], precomp2[4];
__constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength;
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define MD5_H(x, y, z) ((x) ^ (y) ^ (z))
#define MD5_I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define MD5_FF(a, b, c, d, x, s, ac) \
{(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_GG(a, b, c, d, x, s, ac) \
{(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_HH(a, b, c, d, x, s, ac) \
{(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_II(a, b, c, d, x, s, ac) \
{(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__global__ void cuda_md5_init()
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
working_states2[idx].len = 0;
buffer2[idx].len = 0;
}
bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen)
{
detail = new cuda_device_detail;
detail->device = device;
int deviceCount;
CUDA_SAFE_CALL( hipGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
cout << "There is no device supporting CUDA!" << endl;
return false;
}
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL( hipGetDeviceProperties(&deviceProp, device) );
if (deviceProp.major == 9999) {
cout << "Emulation device found." << endl;
return false;
}
cout << "CUDA device " << device << ": " << deviceProp.name << " (" << 8 * deviceProp.multiProcessorCount << " cores)" << endl;
detail->blocks = 16 * deviceProp.multiProcessorCount;
CUDA_SAFE_CALL( hipSetDevice(device) );
CUDA_SAFE_CALL( hipSetDeviceFlags( hipDeviceScheduleBlockingSync ) );
CUDA_SAFE_CALL( hipHostMalloc( (void**)(&(detail->buffer_host)), 122880 * sizeof(trail_type) ) );
uint32 pc1[4], pc2[4];
uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3];
MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */
pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d;
a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3];
MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */
pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d;
CUDA_SAFE_CALL( hipMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) );
hipLaunchKernelGGL(( cuda_md5_init), dim3(detail->blocks), dim3(256), 0, 0, );
return true;
}
__global__ void cuda_md5_work(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
__syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */
if (x <= y) {
a += ihv1[0];
b += ihv1[1];
c += ihv1[2];
d += ihv1[3];
} else {
a += ihv2mod[0];
b += ihv2mod[1];
c += ihv2mod[2];
d += ihv2mod[3];
}
x = a;
y = d - c;
z = (d - b) & hybridmask;
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
__syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
__global__ void cuda_md5_workmod(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
__syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
if (x <= y) {
x = a + ihv1[0];
y = d + ihv1[3];
z = (c + ihv1[2]) & hybridmask;
} else {
x = a + ihv2mod[0];
y = d + ihv2mod[3];
z = (c + ihv2mod[2]) & hybridmask;
}
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
__syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed,
vector<trail_type>& buf,
vector< pair<trail_type,trail_type> >& collisions, bool mod)
{
// transfer results
hipMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*detail->blocks*256);
// start new cuda computation
if (!mod)
hipLaunchKernelGGL(( cuda_md5_work), dim3(detail->blocks), dim3(256), 0, 0, seed);
else
hipLaunchKernelGGL(( cuda_md5_workmod), dim3(detail->blocks), dim3(256), 0, 0, seed);
// process and return results
buf.clear();
for (unsigned i = 0; i < detail->blocks*256; ++i)
if (detail->buffer_host[i].len)
buf.push_back(detail->buffer_host[i]);
}
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
class timer_detail;
class timer {
public:
timer(bool direct_start = false);
~timer();
void start();
void stop();
double time() const;// get time between start and stop (or now if still running) in seconds
bool isrunning() const { return running; } // check if timer is running
private:
timer_detail* detail;
bool running;
};
class timer_detail {
public:
#ifdef _WIN32
LARGE_INTEGER tstart, tend;
double freq;
#else
struct timeval tstart, tend;
struct timezone tz;
#endif
};
timer::~timer()
{
delete detail;
}
timer::timer(bool direct_start): running(false)
{
detail = new timer_detail;
#ifdef _WIN32
LARGE_INTEGER tmp_freq;
QueryPerformanceFrequency(&tmp_freq);
detail->freq = double(tmp_freq.QuadPart);
#endif
if (direct_start)
start();
}
#ifdef _WIN32
void timer::start()
{
running = true;
QueryPerformanceCounter(&detail->tstart);
}
void timer::stop()
{
QueryPerformanceCounter(&detail->tend);
running = false;
}
double timer::time() const
{
if (running)
{
LARGE_INTEGER tmp_end;
QueryPerformanceCounter(&tmp_end);
return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
} else
return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
}
#else
void timer::start()
{
running = true;
gettimeofday(&detail->tstart, &detail->tz);
}
void timer::stop()
{
gettimeofday(&detail->tend, &detail->tz);
running = false;
}
double timer::time() const
{
double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6);
if (running)
{
struct timeval tmp_end;
gettimeofday(&tmp_end, &detail->tz);
return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1;
} else
return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1;
}
#endif
void cuda_device::benchmark()
{
timer sw;
for (int blocksize = 4; blocksize <= 256; ++blocksize)
for (int threadsize = 250; threadsize <= 257; ++threadsize)
{
sw.start();
uint64 work = 0;
while (sw.time() < 10) {
hipLaunchKernelGGL(( cuda_md5_work), dim3(blocksize), dim3(threadsize), 0, 0, 0);
hipMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize);
++work;
}
uint64 ow = work;
work *= 0x400 * blocksize * threadsize;
cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl;
}
}
int get_num_cuda_devices()
{
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
return deviceCount;
}
void cuda_device_query()
{
int deviceCount;
cutilSafeCall(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
cutilSafeCall(hipGetDeviceProperties(&deviceProp, dev));
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
}
| 54463403ea3e48d7478da2d216ad400b19696a61.cu | /**************************************************************************\
|
| Copyright (C) 2009 Marc Stevens
|
| This program is free software: you can redistribute it and/or modify
| it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
| (at your option) any later version.
|
| This program is distributed in the hope that it will be useful,
| but WITHOUT ANY WARRANTY; without even the implied warranty of
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
|
| You should have received a copy of the GNU General Public License
| along with this program. If not, see <http://www.gnu.org/licenses/>.
|
\**************************************************************************/
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <cuda.h>
#include <boost/cstdint.hpp>
using namespace std;
typedef boost::uint32_t uint32;
typedef boost::uint64_t uint64;
#define TRAIL_NOCONSTRUCTOR
#include "birthday_types.hpp"
class cuda_device_detail {
public:
uint32 device;
uint32 blocks;
trail_type* buffer_host;
};
/* We assume that these are _thread specific_ (instead of global) storage managed by the cuda realtime libraries */
__device__ trail_type working_states2[122880];
__device__ trail_type buffer2[122880];
__constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4];
__constant__ uint32 precomp1[4], precomp2[4];
__constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength;
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define MD5_H(x, y, z) ((x) ^ (y) ^ (z))
#define MD5_I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define MD5_FF(a, b, c, d, x, s, ac) \
{(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_GG(a, b, c, d, x, s, ac) \
{(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_HH(a, b, c, d, x, s, ac) \
{(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_II(a, b, c, d, x, s, ac) \
{(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__global__ void cuda_md5_init()
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
working_states2[idx].len = 0;
buffer2[idx].len = 0;
}
bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen)
{
detail = new cuda_device_detail;
detail->device = device;
int deviceCount;
CUDA_SAFE_CALL( cudaGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
cout << "There is no device supporting CUDA!" << endl;
return false;
}
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, device) );
if (deviceProp.major == 9999) {
cout << "Emulation device found." << endl;
return false;
}
cout << "CUDA device " << device << ": " << deviceProp.name << " (" << 8 * deviceProp.multiProcessorCount << " cores)" << endl;
detail->blocks = 16 * deviceProp.multiProcessorCount;
CUDA_SAFE_CALL( cudaSetDevice(device) );
CUDA_SAFE_CALL( cudaSetDeviceFlags( cudaDeviceBlockingSync ) );
CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->buffer_host)), 122880 * sizeof(trail_type) ) );
uint32 pc1[4], pc2[4];
uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3];
MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */
pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d;
a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3];
MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */
pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d;
CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) );
cuda_md5_init<<<detail->blocks, 256>>>();
return true;
}
__global__ void cuda_md5_work(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
__syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */
if (x <= y) {
a += ihv1[0];
b += ihv1[1];
c += ihv1[2];
d += ihv1[3];
} else {
a += ihv2mod[0];
b += ihv2mod[1];
c += ihv2mod[2];
d += ihv2mod[3];
}
x = a;
y = d - c;
z = (d - b) & hybridmask;
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
__syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
__global__ void cuda_md5_workmod(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
__syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
if (x <= y) {
x = a + ihv1[0];
y = d + ihv1[3];
z = (c + ihv1[2]) & hybridmask;
} else {
x = a + ihv2mod[0];
y = d + ihv2mod[3];
z = (c + ihv2mod[2]) & hybridmask;
}
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
__syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed,
vector<trail_type>& buf,
vector< pair<trail_type,trail_type> >& collisions, bool mod)
{
// transfer results
cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*detail->blocks*256);
// start new cuda computation
if (!mod)
cuda_md5_work<<<detail->blocks, 256>>>(seed);
else
cuda_md5_workmod<<<detail->blocks, 256>>>(seed);
// process and return results
buf.clear();
for (unsigned i = 0; i < detail->blocks*256; ++i)
if (detail->buffer_host[i].len)
buf.push_back(detail->buffer_host[i]);
}
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
class timer_detail;
class timer {
public:
timer(bool direct_start = false);
~timer();
void start();
void stop();
double time() const;// get time between start and stop (or now if still running) in seconds
bool isrunning() const { return running; } // check if timer is running
private:
timer_detail* detail;
bool running;
};
class timer_detail {
public:
#ifdef _WIN32
LARGE_INTEGER tstart, tend;
double freq;
#else
struct timeval tstart, tend;
struct timezone tz;
#endif
};
timer::~timer()
{
delete detail;
}
timer::timer(bool direct_start): running(false)
{
detail = new timer_detail;
#ifdef _WIN32
LARGE_INTEGER tmp_freq;
QueryPerformanceFrequency(&tmp_freq);
detail->freq = double(tmp_freq.QuadPart);
#endif
if (direct_start)
start();
}
#ifdef _WIN32
void timer::start()
{
running = true;
QueryPerformanceCounter(&detail->tstart);
}
void timer::stop()
{
QueryPerformanceCounter(&detail->tend);
running = false;
}
double timer::time() const
{
if (running)
{
LARGE_INTEGER tmp_end;
QueryPerformanceCounter(&tmp_end);
return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
} else
return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
}
#else
void timer::start()
{
running = true;
gettimeofday(&detail->tstart, &detail->tz);
}
void timer::stop()
{
gettimeofday(&detail->tend, &detail->tz);
running = false;
}
double timer::time() const
{
double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6);
if (running)
{
struct timeval tmp_end;
gettimeofday(&tmp_end, &detail->tz);
return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1;
} else
return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1;
}
#endif
void cuda_device::benchmark()
{
timer sw;
for (int blocksize = 4; blocksize <= 256; ++blocksize)
for (int threadsize = 250; threadsize <= 257; ++threadsize)
{
sw.start();
uint64 work = 0;
while (sw.time() < 10) {
cuda_md5_work<<<blocksize, threadsize>>>(0);
cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize);
++work;
}
uint64 ow = work;
work *= 0x400 * blocksize * threadsize;
cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl;
}
}
int get_num_cuda_devices()
{
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
return deviceCount;
}
void cuda_device_query()
{
int deviceCount;
cutilSafeCall(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cutilSafeCall(cudaGetDeviceProperties(&deviceProp, dev));
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
}
|
259c155f8fbee57b9c3c348792f24f15d09a0468.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricAveragePooling.cu"
#else
static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceil_mode)
{
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int ndim = input->dim();
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
if (!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4)
{
THArgCheck(input->size(dimw) >= kW && input->size(dimh) >= kH
&& input->size(dimt) >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size(dimt), input->size(dimh), input->size(dimw),
kT, kH, kW);
/* sizes */
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else if (!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 5)
{
THArgCheck(input->size(dimw) >= kW && input->size(dimh) >= kH
&& input->size(dimt) >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size(dimt), input->size(dimh), input->size(dimw),
kT, kH, kW);
/* sizes */
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
else
{
AT_ERROR("non-empty 4D or 5D tensor expected, but got size: ", input->sizes());
}
// The second argument is the index of padH.
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11,
"pad should not be greater than half of kernel size, but got "
"padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d",
padT, padW, padH, kT, kW, kH);
int outputTime;
int outputHeight;
int outputWidth;
if (ceil_mode)
{
outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
else
{
outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
if (padT || padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (gradOutput != NULL)
{
THCUNN_check_dim_size(state, gradOutput, ndim, dimN, inputSlices);
THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
void THNN_(VolumetricAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int dimt = 1;
int dimh = 2;
int dimw = 3;
int fiveDimensionalInput = THCTensor_(nDimensionLegacyNoScalars)(state, input) == 5;
if (fiveDimensionalInput)
{
dimt++;
dimh++;
dimw++;
}
THNN_(VolumetricAveragePooling_shapeCheck)
(state, input, NULL, kT, kW, kH, dT, dW, dH,
padT, padW, padH, ceil_mode);
if (!fiveDimensionalInput) /* 4D */
{
/* sizes */
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else /* 5D */
{
/* sizes */
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
int outputTime;
int outputHeight;
int outputWidth;
if (ceil_mode)
{
outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
else
{
outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
if (padT || padH || padW)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (!fiveDimensionalInput) /* 4D */
{
/* resize output */
THCTensor_(resize4d)(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
}
else /* 5D */
{
THCTensor_(resize5d)(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCTensor_(newContiguous)(state, input);
if (fiveDimensionalInput) {
// Collapse batch and feature dimensions
output = THCTensor_(newFoldBatchDim)(state, output);
THCTensor *old_input = input;
input = THCTensor_(newFoldBatchDim)(state, input);
THCTensor_(free)(state, old_input);
} else {
THCTensor_(retain)(state, output);
}
THCDeviceTensor<real, 4> cudaInput;
THCDeviceTensor<real, 4> cudaOutput;
cudaInput = toDeviceTensor<real, 4>(state, input);
cudaOutput = toDeviceTensor<real, 4>(state, output);
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW)
{
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateOutput<real, accreal>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
cudaInput,
cudaOutput,
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ);
break;
}
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(hipGetLastError());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, output);
}
void THNN_(VolumetricAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THNN_(VolumetricAveragePooling_shapeCheck)
(state, input, gradOutput, kT, kW, kH, dT, dW, dH,
padT, padW, padH, ceil_mode);
bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW);
// Resize and initialize result tensor.
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
int fiveDimensionalInput = THCTensor_(nDimensionLegacyNoScalars)(state, input) == 5;
if (!fiveDimensionalInput) /* 4D */
{
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
outputTime = THCTensor_(size)(state, gradOutput, 1);
outputHeight = THCTensor_(size)(state, gradOutput, 2);
outputWidth = THCTensor_(size)(state, gradOutput, 3);
}
else
{
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
outputTime = THCTensor_(size)(state, gradOutput, 2);
outputHeight = THCTensor_(size)(state, gradOutput, 3);
outputWidth = THCTensor_(size)(state, gradOutput, 4);
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (fiveDimensionalInput) {
// Collapse batch and feature dimensions
gradInput = THCTensor_(newFoldBatchDim)(state, gradInput);
THCTensor *old_gradOutput = gradOutput;
gradOutput = THCTensor_(newFoldBatchDim)(state, gradOutput);
THCTensor_(free)(state, old_gradOutput);
} else {
THCTensor_(retain)(state, gradInput);
}
THCDeviceTensor<real, 4> cudaGradInput;
THCDeviceTensor<real, 4> cudaGradOutput;
cudaGradInput = toDeviceTensor<real, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
dim3 block(32, 8);
// Optimizing for stride 1 is probably only of limited value, but this
// specialization yields 3x speedup over the atomicAdd implementation.
// Padding must be 0, otherwise, pool size may change.
if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0)
{
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
while (totalZ > 0) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput_Stride1<real, accreal>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW), offsetZ);
THCudaCheck(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
else
{
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
if (kernelsOverlap)
{
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput_atomicAdd<real, accreal>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW,
padT, padH, padW, count_include_pad, offsetZ);
}
else
{
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateGradInput<real, accreal>)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW,
padT, padH, padW, count_include_pad, offsetZ);
}
THCudaCheck(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
THCTensor_(free)(state, gradInput);
THCTensor_(free)(state, gradOutput);
}
#endif
| 259c155f8fbee57b9c3c348792f24f15d09a0468.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricAveragePooling.cu"
#else
static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceil_mode)
{
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int ndim = input->dim();
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
if (!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4)
{
THArgCheck(input->size(dimw) >= kW && input->size(dimh) >= kH
&& input->size(dimt) >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size(dimt), input->size(dimh), input->size(dimw),
kT, kH, kW);
/* sizes */
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else if (!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 5)
{
THArgCheck(input->size(dimw) >= kW && input->size(dimh) >= kH
&& input->size(dimt) >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size(dimt), input->size(dimh), input->size(dimw),
kT, kH, kW);
/* sizes */
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
else
{
AT_ERROR("non-empty 4D or 5D tensor expected, but got size: ", input->sizes());
}
// The second argument is the index of padH.
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11,
"pad should not be greater than half of kernel size, but got "
"padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d",
padT, padW, padH, kT, kW, kH);
int outputTime;
int outputHeight;
int outputWidth;
if (ceil_mode)
{
outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
else
{
outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
if (padT || padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (gradOutput != NULL)
{
THCUNN_check_dim_size(state, gradOutput, ndim, dimN, inputSlices);
THCUNN_check_dim_size(state, gradOutput, ndim, dimt, outputTime);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
void THNN_(VolumetricAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int dimt = 1;
int dimh = 2;
int dimw = 3;
int fiveDimensionalInput = THCTensor_(nDimensionLegacyNoScalars)(state, input) == 5;
if (fiveDimensionalInput)
{
dimt++;
dimh++;
dimw++;
}
THNN_(VolumetricAveragePooling_shapeCheck)
(state, input, NULL, kT, kW, kH, dT, dW, dH,
padT, padW, padH, ceil_mode);
if (!fiveDimensionalInput) /* 4D */
{
/* sizes */
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else /* 5D */
{
/* sizes */
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
int outputTime;
int outputHeight;
int outputWidth;
if (ceil_mode)
{
outputTime = ceil(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = ceil(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = ceil(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
else
{
outputTime = floor(float(inputTime - kT + 2*padT) / float(dT)) + 1;
outputHeight = floor(float(inputHeight - kH + 2*padH) / float(dH)) + 1;
outputWidth = floor(float(inputWidth - kW + 2*padW) / float(dW)) + 1;
}
if (padT || padH || padW)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (!fiveDimensionalInput) /* 4D */
{
/* resize output */
THCTensor_(resize4d)(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
}
else /* 5D */
{
THCTensor_(resize5d)(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCTensor_(newContiguous)(state, input);
if (fiveDimensionalInput) {
// Collapse batch and feature dimensions
output = THCTensor_(newFoldBatchDim)(state, output);
THCTensor *old_input = input;
input = THCTensor_(newFoldBatchDim)(state, input);
THCTensor_(free)(state, old_input);
} else {
THCTensor_(retain)(state, output);
}
THCDeviceTensor<real, 4> cudaInput;
THCDeviceTensor<real, 4> cudaOutput;
cudaInput = toDeviceTensor<real, 4>(state, input);
cudaOutput = toDeviceTensor<real, 4>(state, output);
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW)
{
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6);
LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
cuda_VolumetricAveragePooling_updateOutput<real, accreal>
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
cudaInput,
cudaOutput,
kT, kH, kW,
dT, dH, dW,
padT, padH, padW,
count_include_pad,
offsetZ);
break;
}
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(cudaGetLastError());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, output);
}
void THNN_(VolumetricAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THNN_(VolumetricAveragePooling_shapeCheck)
(state, input, gradOutput, kT, kW, kH, dT, dW, dH,
padT, padW, padH, ceil_mode);
bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW);
// Resize and initialize result tensor.
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
int fiveDimensionalInput = THCTensor_(nDimensionLegacyNoScalars)(state, input) == 5;
if (!fiveDimensionalInput) /* 4D */
{
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
outputTime = THCTensor_(size)(state, gradOutput, 1);
outputHeight = THCTensor_(size)(state, gradOutput, 2);
outputWidth = THCTensor_(size)(state, gradOutput, 3);
}
else
{
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
outputTime = THCTensor_(size)(state, gradOutput, 2);
outputHeight = THCTensor_(size)(state, gradOutput, 3);
outputWidth = THCTensor_(size)(state, gradOutput, 4);
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (fiveDimensionalInput) {
// Collapse batch and feature dimensions
gradInput = THCTensor_(newFoldBatchDim)(state, gradInput);
THCTensor *old_gradOutput = gradOutput;
gradOutput = THCTensor_(newFoldBatchDim)(state, gradOutput);
THCTensor_(free)(state, old_gradOutput);
} else {
THCTensor_(retain)(state, gradInput);
}
THCDeviceTensor<real, 4> cudaGradInput;
THCDeviceTensor<real, 4> cudaGradOutput;
cudaGradInput = toDeviceTensor<real, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
dim3 block(32, 8);
// Optimizing for stride 1 is probably only of limited value, but this
// specialization yields 3x speedup over the atomicAdd implementation.
// Padding must be 0, otherwise, pool size may change.
if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0)
{
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
while (totalZ > 0) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
cuda_VolumetricAveragePooling_updateGradInput_Stride1<real, accreal>
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
cudaGradOutput, cudaGradInput, kT, kH, kW, 1.0f/(kT * kH * kW), offsetZ);
THCudaCheck(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
else
{
int totalZ = outputTime * inputSlices * batchSize;
int offsetZ = 0;
while (totalZ > 0) {
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
if (kernelsOverlap)
{
cuda_VolumetricAveragePooling_updateGradInput_atomicAdd<real, accreal>
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW,
padT, padH, padW, count_include_pad, offsetZ);
}
else
{
cuda_VolumetricAveragePooling_updateGradInput<real, accreal>
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
cudaGradOutput, cudaGradInput, kT, kH, kW, dT, dH, dW,
padT, padH, padW, count_include_pad, offsetZ);
}
THCudaCheck(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
THCTensor_(free)(state, gradInput);
THCTensor_(free)(state, gradOutput);
}
#endif
|
d5bd136190eaeaa6c3c838d883effea7b82f30b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ExpImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Exp(x); }
};
} // namespace
void CudaDevice::Exp(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&x_cast, &out](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ExpImpl<T>{}, x_cast, out);
});
}
namespace {
template <typename T>
struct LogImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Log(x); }
};
} // namespace
void CudaDevice::Log(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&x_cast, &out](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(LogImpl<T>{}, x_cast, out);
});
}
} // namespace cuda
} // namespace chainerx
| d5bd136190eaeaa6c3c838d883effea7b82f30b1.cu | #include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ExpImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Exp(x); }
};
} // namespace
void CudaDevice::Exp(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&x_cast, &out](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ExpImpl<T>{}, x_cast, out);
});
}
namespace {
template <typename T>
struct LogImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Log(x); }
};
} // namespace
void CudaDevice::Log(const Array& x, const Array& out) {
CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&x_cast, &out](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(LogImpl<T>{}, x_cast, out);
});
}
} // namespace cuda
} // namespace chainerx
|
d2f1c49425b48629db93ebf89f378c3c42e6b27c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************************************************
//
// File: ModCubeRoot.cu
//
// This CUDA C file is the kernel function for the GPU to try and break the cipher
// key
//
//******************************************************************************
// Number of threads per block.
#define NT 1024
// Overall counter variable in global memory.
__device__ unsigned int incrementNumber;
/**
* This kernel is used to break the RSA
* @param N is a large integer, e.g. a 2048-bit integer.
* @param C cipher key.
* @param possibleValues array of possible answers.
*
* @author Nikhil Keswaney
* @version 11-15-2018
*/
extern "C" __global__ void DoBruteForce
(unsigned long long int N, unsigned long long int C, unsigned long long int* possibleValues)
{
unsigned long long int thr, size, rank;
unsigned long long int m;
// Determine number of threads and this thread's rank.
thr = threadIdx.x;
size = gridDim.x*NT;
rank = blockIdx.x*NT + thr;
for (unsigned long long int i = rank; i < N; i += size)
{
m = (((i * i) % N) * i) % N;
if(m == C){
possibleValues[atomicAdd(&incrementNumber, 1)] = i;
}
}
}
| d2f1c49425b48629db93ebf89f378c3c42e6b27c.cu | //******************************************************************************
//
// File: ModCubeRoot.cu
//
// This CUDA C file is the kernel function for the GPU to try and break the cipher
// key
//
//******************************************************************************
// Number of threads per block.
#define NT 1024
// Overall counter variable in global memory.
__device__ unsigned int incrementNumber;
/**
* This kernel is used to break the RSA
* @param N is a large integer, e.g. a 2048-bit integer.
* @param C cipher key.
* @param possibleValues array of possible answers.
*
* @author Nikhil Keswaney
* @version 11-15-2018
*/
extern "C" __global__ void DoBruteForce
(unsigned long long int N, unsigned long long int C, unsigned long long int* possibleValues)
{
unsigned long long int thr, size, rank;
unsigned long long int m;
// Determine number of threads and this thread's rank.
thr = threadIdx.x;
size = gridDim.x*NT;
rank = blockIdx.x*NT + thr;
for (unsigned long long int i = rank; i < N; i += size)
{
m = (((i * i) % N) * i) % N;
if(m == C){
possibleValues[atomicAdd(&incrementNumber, 1)] = i;
}
}
}
|
a98b81a0bcd462b690f6968216455a4c38ce3667.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Utilities.cuh"
#include "InputOutput.cuh"
#define BLOCKSIZE 128
/*******************/
/* KERNEL FUNCTION */
/*******************/
template<class T>
__global__ void kernelFunction(T * __restrict__ d_data, const unsigned int NperGPU) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < NperGPU) for (int k = 0; k < 1000; k++) d_data[tid] = d_data[tid] * d_data[tid];
}
/******************/
/* PLAN STRUCTURE */
/******************/
template<class T>
struct plan {
T *d_data;
T *h_data;
};
/*********************/
/* SVD PLAN CREATION */
/*********************/
template<class T>
void createPlan(plan<T>& plan, unsigned int NperGPU, unsigned int gpuID) {
// --- Device allocation
gpuErrchk(hipSetDevice(gpuID));
gpuErrchk(hipMalloc(&(plan.d_data), NperGPU * sizeof(T)));
gpuErrchk(hipHostMalloc((void **)&plan.h_data, NperGPU * sizeof(T)));
}
/********/
/* MAIN */
/********/
int main() {
const int numGPUs = 4;
const int NperGPU = 500000;
const int N = NperGPU * numGPUs;
plan<double> plan[numGPUs];
for (int k = 0; k < numGPUs; k++) createPlan(plan[k], NperGPU, k);
// --- "Depth-first" approach - no stream
for (int k = 0; k < numGPUs; k++)
{
gpuErrchk(hipSetDevice(k));
gpuErrchk(hipMemcpyAsync(plan[k].d_data, plan[k].h_data, NperGPU * sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelFunction), dim3(iDivUp(NperGPU, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, plan[k].d_data, NperGPU);
gpuErrchk(hipMemcpyAsync(plan[k].h_data, plan[k].d_data, NperGPU * sizeof(double), hipMemcpyDeviceToHost));
}
gpuErrchk(hipDeviceReset());
}
| a98b81a0bcd462b690f6968216455a4c38ce3667.cu | #include "Utilities.cuh"
#include "InputOutput.cuh"
#define BLOCKSIZE 128
/*******************/
/* KERNEL FUNCTION */
/*******************/
template<class T>
__global__ void kernelFunction(T * __restrict__ d_data, const unsigned int NperGPU) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < NperGPU) for (int k = 0; k < 1000; k++) d_data[tid] = d_data[tid] * d_data[tid];
}
/******************/
/* PLAN STRUCTURE */
/******************/
template<class T>
struct plan {
T *d_data;
T *h_data;
};
/*********************/
/* SVD PLAN CREATION */
/*********************/
template<class T>
void createPlan(plan<T>& plan, unsigned int NperGPU, unsigned int gpuID) {
// --- Device allocation
gpuErrchk(cudaSetDevice(gpuID));
gpuErrchk(cudaMalloc(&(plan.d_data), NperGPU * sizeof(T)));
gpuErrchk(cudaMallocHost((void **)&plan.h_data, NperGPU * sizeof(T)));
}
/********/
/* MAIN */
/********/
int main() {
const int numGPUs = 4;
const int NperGPU = 500000;
const int N = NperGPU * numGPUs;
plan<double> plan[numGPUs];
for (int k = 0; k < numGPUs; k++) createPlan(plan[k], NperGPU, k);
// --- "Depth-first" approach - no stream
for (int k = 0; k < numGPUs; k++)
{
gpuErrchk(cudaSetDevice(k));
gpuErrchk(cudaMemcpyAsync(plan[k].d_data, plan[k].h_data, NperGPU * sizeof(double), cudaMemcpyHostToDevice));
kernelFunction<<<iDivUp(NperGPU, BLOCKSIZE), BLOCKSIZE>>>(plan[k].d_data, NperGPU);
gpuErrchk(cudaMemcpyAsync(plan[k].h_data, plan[k].d_data, NperGPU * sizeof(double), cudaMemcpyDeviceToHost));
}
gpuErrchk(cudaDeviceReset());
}
|
8af2c585e93870417f71bf08fc1b985578ea61c7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/Distance.cuh>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
namespace faiss {
namespace gpu {
FlatIndex::FlatIndex(
GpuResources* res,
int dim,
bool useFloat16,
MemorySpace space)
: resources_(res),
dim_(dim),
useFloat16_(useFloat16),
space_(space),
num_(0),
rawData32_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())),
rawData16_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())) {}
bool FlatIndex::getUseFloat16() const {
return useFloat16_;
}
/// Returns the number of vectors we contain
int FlatIndex::getSize() const {
if (useFloat16_) {
return vectorsHalf_.getSize(0);
} else {
return vectors_.getSize(0);
}
}
int FlatIndex::getDim() const {
return dim_;
}
void FlatIndex::reserve(size_t numVecs, hipStream_t stream) {
if (useFloat16_) {
rawData16_.reserve(numVecs * dim_ * sizeof(half), stream);
} else {
rawData32_.reserve(numVecs * dim_ * sizeof(float), stream);
}
// The above may have caused a reallocation, we need to update the vector
// types
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {num_, dim_});
vectors_ = std::move(vectors32);
}
}
Tensor<float, 2, true>& FlatIndex::getVectorsFloat32Ref() {
// Should not call this unless we are in float32 mode
FAISS_ASSERT(!useFloat16_);
return vectors_;
}
Tensor<half, 2, true>& FlatIndex::getVectorsFloat16Ref() {
// Should not call this unless we are in float16 mode
FAISS_ASSERT(useFloat16_);
return vectorsHalf_;
}
void FlatIndex::query(
Tensor<float, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
bool exactDistance) {
auto stream = resources_->getDefaultStreamCurrentDevice();
if (useFloat16_) {
// We need to convert the input to float16 for comparison to ourselves
auto inputHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, input);
query(inputHalf,
k,
metric,
metricArg,
outDistances,
outIndices,
exactDistance);
} else {
bfKnnOnDevice(
resources_,
getCurrentDevice(),
stream,
vectors_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
}
void FlatIndex::query(
Tensor<half, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
bool exactDistance) {
FAISS_ASSERT(useFloat16_);
bfKnnOnDevice(
resources_,
getCurrentDevice(),
resources_->getDefaultStreamCurrentDevice(),
vectorsHalf_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
void FlatIndex::computeResidual(
Tensor<float, 2, true>& vecs,
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& residuals) {
if (useFloat16_) {
runCalcResidual(
vecs,
getVectorsFloat16Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
} else {
runCalcResidual(
vecs,
getVectorsFloat32Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
}
}
void FlatIndex::reconstruct(
idx_t start,
idx_t num,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == num);
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(start, num, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(start, num, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::reconstruct(
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == ids.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(ids, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(ids, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::add(const float* data, int numVecs, hipStream_t stream) {
if (numVecs == 0) {
return;
}
// convert and add to float16 data if needed
if (useFloat16_) {
// Make sure that `data` is on our device; we'll run the
// conversion on our device
auto devData = toDeviceTemporary<float, 2>(
resources_,
getCurrentDevice(),
(float*)data,
stream,
{numVecs, dim_});
auto devDataHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, devData);
rawData16_.append(
(char*)devDataHalf.data(),
devDataHalf.getSizeInBytes(),
stream,
true /* reserve exactly */);
} else {
// add to float32 data
rawData32_.append(
(char*)data,
(size_t)dim_ * numVecs * sizeof(float),
stream,
true /* reserve exactly */);
}
num_ += numVecs;
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {(int)num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {(int)num_, dim_});
vectors_ = std::move(vectors32);
}
// Precompute L2 norms of our database
if (useFloat16_) {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{(int)num_});
runL2Norm(vectorsHalf_, true, norms, true, stream);
norms_ = std::move(norms);
} else {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{(int)num_});
runL2Norm(vectors_, true, norms, true, stream);
norms_ = std::move(norms);
}
}
void FlatIndex::reset() {
rawData32_.clear();
rawData16_.clear();
vectors_ = DeviceTensor<float, 2, true>();
vectorsHalf_ = DeviceTensor<half, 2, true>();
norms_ = DeviceTensor<float, 1, true>();
num_ = 0;
}
} // namespace gpu
} // namespace faiss
| 8af2c585e93870417f71bf08fc1b985578ea61c7.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/Distance.cuh>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
namespace faiss {
namespace gpu {
FlatIndex::FlatIndex(
GpuResources* res,
int dim,
bool useFloat16,
MemorySpace space)
: resources_(res),
dim_(dim),
useFloat16_(useFloat16),
space_(space),
num_(0),
rawData32_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())),
rawData16_(
res,
AllocInfo(
AllocType::FlatData,
getCurrentDevice(),
space,
res->getDefaultStreamCurrentDevice())) {}
bool FlatIndex::getUseFloat16() const {
return useFloat16_;
}
/// Returns the number of vectors we contain
int FlatIndex::getSize() const {
if (useFloat16_) {
return vectorsHalf_.getSize(0);
} else {
return vectors_.getSize(0);
}
}
int FlatIndex::getDim() const {
return dim_;
}
void FlatIndex::reserve(size_t numVecs, cudaStream_t stream) {
if (useFloat16_) {
rawData16_.reserve(numVecs * dim_ * sizeof(half), stream);
} else {
rawData32_.reserve(numVecs * dim_ * sizeof(float), stream);
}
// The above may have caused a reallocation, we need to update the vector
// types
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {num_, dim_});
vectors_ = std::move(vectors32);
}
}
Tensor<float, 2, true>& FlatIndex::getVectorsFloat32Ref() {
// Should not call this unless we are in float32 mode
FAISS_ASSERT(!useFloat16_);
return vectors_;
}
Tensor<half, 2, true>& FlatIndex::getVectorsFloat16Ref() {
// Should not call this unless we are in float16 mode
FAISS_ASSERT(useFloat16_);
return vectorsHalf_;
}
void FlatIndex::query(
Tensor<float, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
bool exactDistance) {
auto stream = resources_->getDefaultStreamCurrentDevice();
if (useFloat16_) {
// We need to convert the input to float16 for comparison to ourselves
auto inputHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, input);
query(inputHalf,
k,
metric,
metricArg,
outDistances,
outIndices,
exactDistance);
} else {
bfKnnOnDevice(
resources_,
getCurrentDevice(),
stream,
vectors_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
}
void FlatIndex::query(
Tensor<half, 2, true>& input,
int k,
faiss::MetricType metric,
float metricArg,
Tensor<float, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices,
bool exactDistance) {
FAISS_ASSERT(useFloat16_);
bfKnnOnDevice(
resources_,
getCurrentDevice(),
resources_->getDefaultStreamCurrentDevice(),
vectorsHalf_,
true, // is vectors row major?
&norms_,
input,
true, // input is row major
k,
metric,
metricArg,
outDistances,
outIndices,
!exactDistance);
}
void FlatIndex::computeResidual(
Tensor<float, 2, true>& vecs,
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& residuals) {
if (useFloat16_) {
runCalcResidual(
vecs,
getVectorsFloat16Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
} else {
runCalcResidual(
vecs,
getVectorsFloat32Ref(),
ids,
residuals,
resources_->getDefaultStreamCurrentDevice());
}
}
void FlatIndex::reconstruct(
idx_t start,
idx_t num,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == num);
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(start, num, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(start, num, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::reconstruct(
Tensor<idx_t, 1, true>& ids,
Tensor<float, 2, true>& vecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
FAISS_ASSERT(vecs.getSize(0) == ids.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
if (useFloat16_) {
runReconstruct(ids, getVectorsFloat16Ref(), vecs, stream);
} else {
runReconstruct(ids, getVectorsFloat32Ref(), vecs, stream);
}
}
void FlatIndex::add(const float* data, int numVecs, cudaStream_t stream) {
if (numVecs == 0) {
return;
}
// convert and add to float16 data if needed
if (useFloat16_) {
// Make sure that `data` is on our device; we'll run the
// conversion on our device
auto devData = toDeviceTemporary<float, 2>(
resources_,
getCurrentDevice(),
(float*)data,
stream,
{numVecs, dim_});
auto devDataHalf = convertTensorTemporary<float, half, 2>(
resources_, stream, devData);
rawData16_.append(
(char*)devDataHalf.data(),
devDataHalf.getSizeInBytes(),
stream,
true /* reserve exactly */);
} else {
// add to float32 data
rawData32_.append(
(char*)data,
(size_t)dim_ * numVecs * sizeof(float),
stream,
true /* reserve exactly */);
}
num_ += numVecs;
if (useFloat16_) {
DeviceTensor<half, 2, true> vectors16(
(half*)rawData16_.data(), {(int)num_, dim_});
vectorsHalf_ = std::move(vectors16);
} else {
DeviceTensor<float, 2, true> vectors32(
(float*)rawData32_.data(), {(int)num_, dim_});
vectors_ = std::move(vectors32);
}
// Precompute L2 norms of our database
if (useFloat16_) {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{(int)num_});
runL2Norm(vectorsHalf_, true, norms, true, stream);
norms_ = std::move(norms);
} else {
DeviceTensor<float, 1, true> norms(
resources_,
makeSpaceAlloc(AllocType::FlatData, space_, stream),
{(int)num_});
runL2Norm(vectors_, true, norms, true, stream);
norms_ = std::move(norms);
}
}
void FlatIndex::reset() {
rawData32_.clear();
rawData16_.clear();
vectors_ = DeviceTensor<float, 2, true>();
vectorsHalf_ = DeviceTensor<half, 2, true>();
norms_ = DeviceTensor<float, 1, true>();
num_ = 0;
}
} // namespace gpu
} // namespace faiss
|
da787dea259ba94abdcb8986521c3d46337a2629.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "l2_norm_dVector_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *partial_sum = NULL;
hipMalloc(&partial_sum, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
l2_norm_dVector_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,partial_sum,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
l2_norm_dVector_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,partial_sum,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
l2_norm_dVector_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,partial_sum,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | da787dea259ba94abdcb8986521c3d46337a2629.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "l2_norm_dVector_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *partial_sum = NULL;
cudaMalloc(&partial_sum, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
l2_norm_dVector_kernel<<<gridBlock,threadBlock>>>(a,partial_sum,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
l2_norm_dVector_kernel<<<gridBlock,threadBlock>>>(a,partial_sum,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
l2_norm_dVector_kernel<<<gridBlock,threadBlock>>>(a,partial_sum,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3ac66280c8fc1da12a459304fdadf9dae4c65bdb.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_cooperative_groups.h>
#include <hip/hip_runtime_api.h>
using namespace cooperative_groups;
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
/////test concurrent vs loop accesses fault number. concurrent warp vs concurrent thread.
/////more: concurrent warp on the same core vs different cores (observation:)
//////nvprof --profile-from-start off --print-gpu-trace --log-file 4warpsall.txt --csv ./fault_group_test15
void init_cpu_data(long long int* A, long long int size, double stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
/*
///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m),
__global__ void page_visitor(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride;
//if(warp_id == 27){
// temp = (1 * 32 + (threadIdx.x % 32) ) * stride;
//}
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
long long int index = __double2ll_rd(temp);
long long int value1;
//if(warp_id == 0 || warp_id == 27){
if(threadIdx.x % 32 <= clock_count){
value1 = A1[index];
B1[index] = value1;
}
//}
}
*/
/*
__global__ void page_visitor2(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///mixed
int warps_per_grid = (blockDim.x * gridDim.x) >> 5;
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
if(threadIdx.x % 32 <= clock_count){
value1 = A1[index];
B1[index] = value1;
}
}
*/
#define stride 1
///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m),
__global__ void page_visitor4(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////1 thread 1 data / 1 warp 1 data
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
//double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride;
double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
//if(threadIdx.x <= clock_count){
value1 = A1[index];
B1[index] = value1;
//}
}
__global__ void page_visitor5(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 thread all data
int warps_per_grid = (blockDim.x * gridDim.x) >> 5;
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
//double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride;
double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
for(long long int i = 0; i <= clock_count; i++){
if(threadIdx.x == 0){
value1 = A1[index];
B1[index] = value1;
}
index+=stride;
}
}
__global__ void page_visitor6(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 warp all data
int warps_per_grid = (blockDim.x * gridDim.x) >> 5;
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
//double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride;
double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
for(long long int i = 0; i <= clock_count; i++){
value1 = A1[index];
B1[index] = value1;
index += 32 *stride;
}
}
///////////long 0 - 31 same core
///////////long 0 - 64 same core
///////////long 0 - 64 different core
///////////mixed 0 - 64 same core
///////////mixed 0 - 64 different core
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
//int peak_clk = 1;//kHz
//checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
//float clock_rate = (float) peak_clk;
//printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
/*
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
*/
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
//printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
//long long int num_thread = 256;
//long long int size_of_data = 524288;
///*
//printf("############approach\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
if(0){
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
hipProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 1;
hipLaunchKernelGGL(( page_visitor4), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);////////1 thread 1 data
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(hipFree(CPU_data_in1));
checkCudaErrors(hipFree(GPU_data_out1));
hipProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
if(0){
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
hipProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 1;
hipLaunchKernelGGL(( page_visitor5), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 thread all data
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(hipFree(CPU_data_in1));
checkCudaErrors(hipFree(GPU_data_out1));
hipProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
if(0){
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
hipProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 1;
hipLaunchKernelGGL(( page_visitor4), dim3(block_num), dim3(128), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data same core
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(hipFree(CPU_data_in1));
checkCudaErrors(hipFree(GPU_data_out1));
hipProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
if(0){
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
hipProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 4;
hipLaunchKernelGGL(( page_visitor4), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data dif cores
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(hipFree(CPU_data_in1));
checkCudaErrors(hipFree(GPU_data_out1));
hipProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 3; clock_count <= 3; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
if(0){
hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max
hipDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
hipProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
long long int block_num = 1;
hipLaunchKernelGGL(( page_visitor6), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp all data
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(hipFree(CPU_data_in1));
checkCudaErrors(hipFree(GPU_data_out1));
hipProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
exit(EXIT_SUCCESS);
} | 3ac66280c8fc1da12a459304fdadf9dae4c65bdb.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
#include <cooperative_groups.h>
#include <cuda_profiler_api.h>
using namespace cooperative_groups;
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
/////test concurrent vs loop accesses fault number. concurrent warp vs concurrent thread.
/////more: concurrent warp on the same core vs different cores (observation:)
//////nvprof --profile-from-start off --print-gpu-trace --log-file 4warpsall.txt --csv ./fault_group_test15
void init_cpu_data(long long int* A, long long int size, double stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
/*
///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m),
__global__ void page_visitor(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride;
//if(warp_id == 27){
// temp = (1 * 32 + (threadIdx.x % 32) ) * stride;
//}
//double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
//double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1;
long long int index = __double2ll_rd(temp);
long long int value1;
//if(warp_id == 0 || warp_id == 27){
if(threadIdx.x % 32 <= clock_count){
value1 = A1[index];
B1[index] = value1;
}
//}
}
*/
/*
__global__ void page_visitor2(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///mixed
int warps_per_grid = (blockDim.x * gridDim.x) >> 5;
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
if(threadIdx.x % 32 <= clock_count){
value1 = A1[index];
B1[index] = value1;
}
}
*/
#define stride 1
///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m),
__global__ void page_visitor4(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////1 thread 1 data / 1 warp 1 data
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
//double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride;
double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
//if(threadIdx.x <= clock_count){
value1 = A1[index];
B1[index] = value1;
//}
}
__global__ void page_visitor5(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 thread all data
int warps_per_grid = (blockDim.x * gridDim.x) >> 5;
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
//double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride;
double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
for(long long int i = 0; i <= clock_count; i++){
if(threadIdx.x == 0){
value1 = A1[index];
B1[index] = value1;
}
index+=stride;
}
}
__global__ void page_visitor6(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 warp all data
int warps_per_grid = (blockDim.x * gridDim.x) >> 5;
long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5;
//double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride;
double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride;
long long int index = __double2ll_rd(temp);
long long int value1;
for(long long int i = 0; i <= clock_count; i++){
value1 = A1[index];
B1[index] = value1;
index += 32 *stride;
}
}
///////////long 0 - 31 same core
///////////long 0 - 64 same core
///////////long 0 - 64 different core
///////////mixed 0 - 64 same core
///////////mixed 0 - 64 different core
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
//int peak_clk = 1;//kHz
//checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
//float clock_rate = (float) peak_clk;
//printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
/*
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
*/
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
//printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
//long long int num_thread = 256;
//long long int size_of_data = 524288;
///*
//printf("############approach\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
if(0){
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
cudaProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 1;
page_visitor4<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);////////1 thread 1 data
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(cudaFree(CPU_data_in1));
checkCudaErrors(cudaFree(GPU_data_out1));
cudaProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
if(0){
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
cudaProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 1;
page_visitor5<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 thread all data
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(cudaFree(CPU_data_in1));
checkCudaErrors(cudaFree(GPU_data_out1));
cudaProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
if(0){
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
cudaProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 1;
page_visitor4<<<block_num, 128>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data same core
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(cudaFree(CPU_data_in1));
checkCudaErrors(cudaFree(GPU_data_out1));
cudaProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
if(0){
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
cudaProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
int block_num = 4;
page_visitor4<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data dif cores
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(cudaFree(CPU_data_in1));
checkCudaErrors(cudaFree(GPU_data_out1));
cudaProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
for(long long int time = 0; time <= 0; time = time + 1){
//printf("\n####################time: %llu\n", time);
//long long int coverage2 = 0;
for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m.
//coverage2++;
//if(coverage2 == 2){
// coverage = 1;
//}
//printf("############coverage: %llu\n", coverage);
for(long long int rate = 1; rate <= 1; rate = rate * 2){
//printf("############rate: %llu\n", rate);
//long long int offset2 = 0;
//for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8
for(long long int offset = 0; offset <= 0; offset = offset + 8){
//offset2++;
//if(offset2 == 2){
// offset = 1;
//}
//printf("############offset: %llu\n", offset);
for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max
//printf("####################factor: %llu\n", factor);
for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
//printf("\n");
for(long long int clock_count = 3; clock_count <= 3; clock_count = clock_count + 1){
///long long int time2 = time;
//if(time2 > clock_count){
// time2 = clock_count;
//}
///////////////////////////////////////////////////////////////////CPU data begin
double temp = data_stride * 512;
long long int data_size = (long long int) temp;
//data_size = data_size * 8192 * 512 / factor;
data_size = data_size * 8192 * 128 / factor;
long long int *CPU_data_in1;
checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////CPU data end
long long int *GPU_data_out1;
checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory
///////////////////////////////////////////////////////////////////GPU data out end
if(1){
double scale = 1;
if(data_stride < 1){
scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization
}
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
if(0){
gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max
cudaDeviceSynchronize();
}else{
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
}else{
init_cpu_data(GPU_data_out1, data_size, data_stride);
init_cpu_data(CPU_data_in1, data_size, data_stride);
}
cudaProfilerStart();////////////////////////////////start
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
long long int block_num = 1;
page_visitor6<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp all data
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
fflush(stdout);
checkCudaErrors(cudaFree(CPU_data_in1));
checkCudaErrors(cudaFree(GPU_data_out1));
cudaProfilerStop();/////////////////////////////////stop
}
}
}
}
}
}
}
printf("\n");
exit(EXIT_SUCCESS);
} |
d4dfe995eee45fcd6876ac79ed8a538394137a82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
#include "device_launch_parameters.h"
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 1 of 1: implement the kernel
__global__ void reverseArrayBlock( int *dst, int *src )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int i_r = (blockDim.x * gridDim.x) - 1 - i;
dst[i] = src[i_r];
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
int dimA = 8;
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numBlocks = 1;
int numThreadsPerBlock = dimA;
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( (void **) &d_a, memSize );
hipMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_b, d_a );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| d4dfe995eee45fcd6876ac79ed8a538394137a82.cu | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
#include "device_launch_parameters.h"
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 1 of 1: implement the kernel
__global__ void reverseArrayBlock( int *dst, int *src )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int i_r = (blockDim.x * gridDim.x) - 1 - i;
dst[i] = src[i_r];
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
int dimA = 8;
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numBlocks = 1;
int numThreadsPerBlock = dimA;
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
cudaMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
reverseArrayBlock<<< dimGrid, dimBlock >>>( d_b, d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
cc4f81429ee5452e25fe251762c4d0a537a5f0f1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file csv-reader.cu code to read csv data
*
* CSV Reader
*/
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include "type_conversion.cuh"
#include "datetime_parser.cuh"
#include "cudf.h"
#include "utilities/error_utils.hpp"
#include "utilities/trie.cuh"
#include "utilities/type_dispatcher.hpp"
#include "utilities/cudf_utils.h"
#include <nvstrings/NVStrings.h>
#include "rmm/rmm.h"
#include "rmm/thrust_rmm_allocator.h"
#include "io/comp/io_uncomp.h"
#include "io/utilities/parsing_utils.cuh"
#include "io/utilities/wrapper_utils.hpp"
using std::vector;
using std::string;
/**---------------------------------------------------------------------------*
* @brief Struct used for internal parsing state
*---------------------------------------------------------------------------**/
typedef struct raw_csv_ {
device_buffer<char> data; // on-device: the raw unprocessed CSV data - loaded as a large char * array
device_buffer<uint64_t> recStart; // on-device: Starting position of the records.
ParseOptions opts; // options to control parsing behavior
long num_bytes; // host: the number of bytes in the data
long num_bits; // host: the number of 64-bit bitmaps (different than valid)
gdf_size_type num_records; // host: number of records loaded into device memory, and then number of records to read
// int num_cols; // host: number of columns
int num_active_cols;// host: number of columns that will be return to user.
int num_actual_cols;// host: number of columns in the file --- based on the number of columns in header
vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end)
vector<string> col_names; // host: array of column names
thrust::host_vector<bool> h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
rmm::device_vector<bool> d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
long byte_range_offset; // offset into the data to start parsing
long byte_range_size; // length of the data of interest to parse
gdf_size_type header_row; ///< host: Row index of the header
gdf_size_type nrows; ///< host: Number of rows to read. -1 for all rows
gdf_size_type skiprows; ///< host: Number of rows to skip from the start
gdf_size_type skipfooter; ///< host: Number of rows to skip from the end
std::vector<char> header; ///< host: Header row data, for parsing column names
string prefix; ///< host: Prepended to column ID if there is no header or input column names
rmm::device_vector<SerialTrieNode> d_trueTrie; // device: serialized trie of values to recognize as true
rmm::device_vector<SerialTrieNode> d_falseTrie;// device: serialized trie of values to recognize as false
rmm::device_vector<SerialTrieNode> d_naTrie; // device: serialized trie of NA values
} raw_csv_t;
typedef struct column_data_ {
unsigned long long countFloat;
unsigned long long countDateAndTime;
unsigned long long countString;
unsigned long long countInt8;
unsigned long long countInt16;
unsigned long long countInt32;
unsigned long long countInt64;
gdf_size_type countNULL;
} column_data_t;
using string_pair = std::pair<const char*,size_t>;
//
//---------------create and process ---------------------------------------------
//
gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv);
// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d);
gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type);
gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes,
const string& compression,
vector<char>& h_uncomp_data);
gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv);
gdf_dtype convertStringToDtype(std::string &dtype);
#define checkError(error, txt) if ( error != GDF_SUCCESS) { std::cerr << "ERROR: " << error << " in " << txt << std::endl; return error; }
//
//---------------CUDA Kernel ---------------------------------------------
//
gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **d_gdf,
gdf_valid_type **valid, gdf_dtype *d_dtypes,
gdf_size_type *num_valid);
gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv,
column_data_t *d_columnData);
__global__ void convertCsvToGdf(char *csv, const ParseOptions opts,
gdf_size_type num_records, int num_columns,
bool *parseCol, uint64_t *recStart,
gdf_dtype *dtype, void **gdf_data,
gdf_valid_type **valid,
gdf_size_type *num_valid);
__global__ void dataTypeDetection(char *raw_csv, const ParseOptions opts,
gdf_size_type num_records, int num_columns,
bool *parseCol, uint64_t *recStart,
column_data_t *d_columnData);
//
//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels ---------------------------------------------
//
__device__ long whichBitmap(long record) { return (record/8); }
__device__ int whichBit(long record) { return (record % 8); }
__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val)
{
int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8);
atomicOr(base_address, int_val);
}
__device__ void setBit(gdf_valid_type* address, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
validAtomicOR(address, bitMask[bit]);
}
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns=0) noexcept {
constexpr size_t max_row_bytes = 16*1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0){
// Use flat size if the number of columns is not known
return max_row_bytes;
}
else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar) {
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) {
str.erase(first_quote, 1);
}
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) {
str.erase(last_quote, 1);
}
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter
*
* The first row can be either the header row, or the first data row
*
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*/
gdf_error setColumnNamesFromCsv(raw_csv_t* raw_csv) {
vector<char> first_row = raw_csv->header;
// No header, read the first data row
if (first_row.empty()) {
uint64_t first_row_len{};
// If file only contains one row, raw_csv->recStart[1] is not valid
if (raw_csv->num_records > 1) {
CUDA_TRY(hipMemcpy(&first_row_len, raw_csv->recStart.data() + 1, sizeof(uint64_t), hipMemcpyDefault));
}
else {
// File has one row - use the file size for the row size
first_row_len = raw_csv->num_bytes / sizeof(char);
}
first_row.resize(first_row_len);
CUDA_TRY(hipMemcpy(first_row.data(), raw_csv->data.data(), first_row_len * sizeof(char), hipMemcpyDefault));
}
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if(first_row[pos] == raw_csv->opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 ||
(!quotation && first_row[pos] == raw_csv->opts.terminator) ||
(!quotation && first_row[pos] == raw_csv->opts.delimiter)) {
// This is the header, add the column name
if (raw_csv->header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == raw_csv->opts.delimiter || first_row[pos] == raw_csv->opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's part of the terminator
if (col_name_len > 0 &&
raw_csv->opts.terminator == '\n' &&
first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
raw_csv->col_names.push_back(removeQuotes(new_col_name, raw_csv->opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is a blank line following the header.
// In this case, first_row includes multiple line terminators at the end, as the new recStart belongs
// to a line that comes after the blank line(s)
if (!quotation && first_row[pos] == raw_csv->opts.terminator){
break;
}
}
else {
// This is the first data row, add the automatically generated name
raw_csv->col_names.push_back(raw_csv->prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (raw_csv->opts.multi_delimiter &&
pos < first_row.size() &&
first_row[pos] == raw_csv->opts.delimiter &&
first_row[pos + 1] == raw_csv->opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Updates the raw_csv_t object with the total number of rows and
* quotation characters in the file
*
* Does not count the quotations if quotechar is set to '/0'.
*
* @param[in] h_data Pointer to the csv data in host memory
* @param[in] h_size Size of the input data, in bytes
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error
*---------------------------------------------------------------------------**/
gdf_error countRecordsAndQuotes(const char *h_data, size_t h_size, raw_csv_t *raw_csv) {
vector<char> chars_to_count{raw_csv->opts.terminator};
if (raw_csv->opts.quotechar != '\0') {
chars_to_count.push_back(raw_csv->opts.quotechar);
}
raw_csv->num_records = countAllFromSet(h_data, h_size, chars_to_count);
// If not starting at an offset, add an extra row to account for the first row in the file
if (raw_csv->byte_range_offset == 0) {
++raw_csv->num_records;
}
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Updates the raw_csv_t object with the offset of each row in the file
* Also add positions of each quotation character in the file.
*
* Does not process the quotations if quotechar is set to '/0'.
*
* @param[in] h_data Pointer to the csv data in host memory
* @param[in] h_size Size of the input data, in bytes
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error
*---------------------------------------------------------------------------**/
gdf_error setRecordStarts(const char *h_data, size_t h_size, raw_csv_t *raw_csv) {
// Allocate space to hold the record starting points
const bool last_line_terminated = (h_data[h_size - 1] == raw_csv->opts.terminator);
// If the last line is not terminated, allocate space for the EOF entry (added later)
const gdf_size_type record_start_count = raw_csv->num_records + (last_line_terminated ? 0 : 1);
raw_csv->recStart = device_buffer<uint64_t>(record_start_count);
auto* find_result_ptr = raw_csv->recStart.data();
if (raw_csv->byte_range_offset == 0) {
find_result_ptr++;
CUDA_TRY(hipMemsetAsync(raw_csv->recStart.data(), 0ull, sizeof(uint64_t)));
}
vector<char> chars_to_find{raw_csv->opts.terminator};
if (raw_csv->opts.quotechar != '\0') {
chars_to_find.push_back(raw_csv->opts.quotechar);
}
// Passing offset = 1 to return positions AFTER the found character
findAllFromSet(h_data, h_size, chars_to_find, 1, find_result_ptr);
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart.data(), raw_csv->recStart.data() + raw_csv->num_records);
// Currently, ignoring lineterminations within quotes is handled by recording
// the records of both, and then filtering out the records that is a quotechar
// or a linetermination within a quotechar pair. The future major refactoring
// of csv_reader and its kernels will probably use a different tactic.
if (raw_csv->opts.quotechar != '\0') {
vector<uint64_t> h_rec_starts(raw_csv->num_records);
const size_t rec_start_size = sizeof(uint64_t) * (h_rec_starts.size());
CUDA_TRY( hipMemcpy(h_rec_starts.data(), raw_csv->recStart.data(), rec_start_size, hipMemcpyDeviceToHost) );
auto recCount = raw_csv->num_records;
bool quotation = false;
for (gdf_size_type i = 1; i < raw_csv->num_records; ++i) {
if (h_data[h_rec_starts[i] - 1] == raw_csv->opts.quotechar) {
quotation = !quotation;
h_rec_starts[i] = raw_csv->num_bytes;
recCount--;
}
else if (quotation) {
h_rec_starts[i] = raw_csv->num_bytes;
recCount--;
}
}
CUDA_TRY( hipMemcpy(raw_csv->recStart.data(), h_rec_starts.data(), rec_start_size, hipMemcpyHostToDevice) );
thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart.data(), raw_csv->recStart.data() + raw_csv->num_records);
raw_csv->num_records = recCount;
}
if (!last_line_terminated){
// Add the EOF as the last record when the terminator is missing in the last line
const uint64_t eof_offset = h_size;
CUDA_TRY(hipMemcpy(raw_csv->recStart.data() + raw_csv->num_records, &eof_offset, sizeof(uint64_t), hipMemcpyDefault));
// Update the record count
++raw_csv->num_records;
}
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Reads CSV-structured data and returns an array of gdf_columns.
*
* @param[in,out] args Structure containing input and output args
*
* @return gdf_error GDF_SUCCESS if successful, otherwise an error code.
*---------------------------------------------------------------------------**/
gdf_error read_csv(csv_read_arg *args)
{
gdf_error error = gdf_error::GDF_SUCCESS;
//-----------------------------------------------------------------------------
// create the CSV data structure - this will be filled in as the CSV data is processed.
// Done first to validate data types
raw_csv_t raw_csv{};
// error = parseArguments(args, raw_csv);
raw_csv.num_actual_cols = args->num_cols;
raw_csv.num_active_cols = args->num_cols;
raw_csv.num_records = 0;
raw_csv.header_row = args->header;
raw_csv.skiprows = args->skiprows;
raw_csv.skipfooter = args->skipfooter;
raw_csv.nrows = args->nrows;
raw_csv.prefix = args->prefix == nullptr ? "" : string(args->prefix);
if (args->delim_whitespace) {
raw_csv.opts.delimiter = ' ';
raw_csv.opts.multi_delimiter = true;
} else {
raw_csv.opts.delimiter = args->delimiter;
raw_csv.opts.multi_delimiter = false;
}
if (args->windowslinetermination) {
raw_csv.opts.terminator = '\n';
} else {
raw_csv.opts.terminator = args->lineterminator;
}
if (args->quotechar != '\0' && args->quoting != QUOTE_NONE) {
raw_csv.opts.quotechar = args->quotechar;
raw_csv.opts.keepquotes = false;
raw_csv.opts.doublequote = args->doublequote;
} else {
raw_csv.opts.quotechar = '\0';
raw_csv.opts.keepquotes = true;
raw_csv.opts.doublequote = false;
}
raw_csv.opts.skipblanklines = args->skip_blank_lines;
raw_csv.opts.comment = args->comment;
raw_csv.opts.dayfirst = args->dayfirst;
raw_csv.opts.decimal = args->decimal;
raw_csv.opts.thousands = args->thousands;
if (raw_csv.opts.decimal == raw_csv.opts.delimiter) {
checkError(GDF_INVALID_API_CALL, "Decimal point cannot be the same as the delimiter");
}
if (raw_csv.opts.thousands == raw_csv.opts.delimiter) {
checkError(GDF_INVALID_API_CALL, "Thousands separator cannot be the same as the delimiter");
}
string compression_type;
error = inferCompressionType(args->compression, args->filepath_or_buffer, compression_type);
checkError(error, "call to inferCompressionType");
raw_csv.byte_range_offset = args->byte_range_offset;
raw_csv.byte_range_size = args->byte_range_size;
if (raw_csv.byte_range_offset > 0 || raw_csv.byte_range_size > 0) {
if (raw_csv.nrows >= 0 || raw_csv.skiprows > 0 || raw_csv.skipfooter > 0) {
checkError(GDF_INVALID_API_CALL,
"Cannot manually limit rows to be read when using the byte range parameter");
}
if (compression_type != "none") {
checkError(GDF_INVALID_API_CALL,
"Cannot read compressed input when using the byte range parameter");
}
}
// Handle user-defined booleans values, whereby field data is substituted
// with true/false values; CUDF booleans are int types of 0 or 1
vector<string> true_values{"True", "TRUE"};
if (args->true_values != nullptr && args->num_true_values > 0) {
for (int i = 0; i < args->num_true_values; ++i) {
true_values.emplace_back(args->true_values[i]);
}
}
raw_csv.d_trueTrie = createSerializedTrie(true_values);
raw_csv.opts.trueValuesTrie = raw_csv.d_trueTrie.data().get();
vector<string> false_values{"False", "FALSE"};
if (args->false_values != nullptr && args->num_false_values > 0) {
for (int i = 0; i < args->num_false_values; ++i) {
false_values.emplace_back(args->false_values[i]);
}
}
raw_csv.d_falseTrie = createSerializedTrie(false_values);
raw_csv.opts.falseValuesTrie = raw_csv.d_falseTrie.data().get();
if (args->na_filter &&
(args->keep_default_na || (args->na_values != nullptr && args->num_na_values > 0))) {
vector<string> na_values{
"#N/A", "#N/A N/A", "#NA", "-1.#IND",
"-1.#QNAN", "-NaN", "-nan", "1.#IND",
"1.#QNAN", "N/A", "NA", "NULL",
"NaN", "n/a", "nan", "null"};
if(!args->keep_default_na){
na_values.clear();
}
if (args->na_values != nullptr && args->num_na_values > 0) {
for (int i = 0; i < args->num_na_values; ++i) {
na_values.emplace_back(args->na_values[i]);
}
}
raw_csv.d_naTrie = createSerializedTrie(na_values);
raw_csv.opts.naValuesTrie = raw_csv.d_naTrie.data().get();
}
args->data = nullptr;
//-----------------------------------------------------------------------------
// memory map in the data
void * map_data = NULL;
size_t map_size = 0;
size_t map_offset = 0;
int fd = 0;
if (args->input_data_form == gdf_csv_input_form::FILE_PATH)
{
fd = open(args->filepath_or_buffer, O_RDONLY );
if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); }
struct stat st{};
if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); }
const auto file_size = st.st_size;
const auto page_size = sysconf(_SC_PAGESIZE);
if (args->byte_range_offset >= (size_t)file_size) {
close(fd);
checkError(GDF_INVALID_API_CALL, "The byte_range offset is larger than the file size");
}
// Have to align map offset to page size
map_offset = (args->byte_range_offset/page_size)*page_size;
// Set to rest-of-the-file size, will reduce based on the byte range size
raw_csv.num_bytes = map_size = file_size - map_offset;
// Include the page padding in the mapped size
const size_t page_padding = args->byte_range_offset - map_offset;
const size_t padded_byte_range_size = raw_csv.byte_range_size + page_padding;
if (raw_csv.byte_range_size != 0 && padded_byte_range_size < map_size) {
// Need to make sure that w/ padding we don't overshoot the end of file
map_size = min(padded_byte_range_size + calculateMaxRowSize(args->num_cols), map_size);
}
// Ignore page padding for parsing purposes
raw_csv.num_bytes = map_size - page_padding;
map_data = mmap(0, map_size, PROT_READ, MAP_PRIVATE, fd, map_offset);
if (map_data == MAP_FAILED || map_size==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); }
}
else if (args->input_data_form == gdf_csv_input_form::HOST_BUFFER)
{
map_data = (void *)args->filepath_or_buffer;
raw_csv.num_bytes = map_size = args->buffer_size;
}
else { checkError(GDF_C_ERROR, "invalid input type"); }
const char* h_uncomp_data;
size_t h_uncomp_size = 0;
// Used when the input data is compressed, to ensure the allocated uncompressed data is freed
vector<char> h_uncomp_data_owner;
if (compression_type == "none") {
// Do not use the owner vector here to avoid copying the whole file to the heap
h_uncomp_data = (const char*)map_data + (args->byte_range_offset - map_offset);
h_uncomp_size = raw_csv.num_bytes;
}
else {
error = getUncompressedHostData( (const char *)map_data, map_size, compression_type, h_uncomp_data_owner);
checkError(error, "call to getUncompressedHostData");
h_uncomp_data = h_uncomp_data_owner.data();
h_uncomp_size = h_uncomp_data_owner.size();
}
assert(h_uncomp_data != nullptr);
assert(h_uncomp_size != 0);
error = countRecordsAndQuotes(h_uncomp_data, h_uncomp_size, &raw_csv);
checkError(error, "call to count the number of rows");
error = setRecordStarts(h_uncomp_data, h_uncomp_size, &raw_csv);
checkError(error, "call to store the row offsets");
error = uploadDataToDevice(h_uncomp_data, h_uncomp_size, &raw_csv);
checkError(error, "call to upload the CSV data to the device");
//-----------------------------------------------------------------------------
//--- done with host data
if (args->input_data_form == gdf_csv_input_form::FILE_PATH)
{
close(fd);
munmap(map_data, map_size);
}
//-----------------------------------------------------------------------------
//-- Populate the header
// Check if the user gave us a list of column names
if(args->names == nullptr) {
error = setColumnNamesFromCsv(&raw_csv);
if (error != GDF_SUCCESS) {
return error;
}
const int h_num_cols = raw_csv.col_names.size();
// Initialize a boolean array that states if a column needs to read or filtered.
raw_csv.h_parseCol = thrust::host_vector<bool>(h_num_cols, true);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < raw_csv.col_names.size(); ++col_idx) {
if (raw_csv.col_names[col_idx].empty()) {
raw_csv.col_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
int h_dup_cols_removed = 0;
// Looking for duplicates
for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){
bool found_dupe = false;
for (auto it2 = (it+1); it2 != raw_csv.col_names.end(); it2++){
if (*it==*it2){
found_dupe=true;
break;
}
}
if(found_dupe){
int count=1;
for (auto it2 = (it+1); it2 != raw_csv.col_names.end(); it2++){
if (*it==*it2){
if(args->mangle_dupe_cols){
// Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X.
std::string newColName = *it2;
newColName += "." + std::to_string(count);
count++;
*it2 = newColName;
} else{
// All duplicate fields will be ignored.
int pos=std::distance(raw_csv.col_names.begin(), it2);
raw_csv.h_parseCol[pos]=false;
h_dup_cols_removed++;
}
}
}
}
}
raw_csv.num_actual_cols = h_num_cols; // Actual number of columns in the CSV file
raw_csv.num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields
}
else {
raw_csv.h_parseCol = thrust::host_vector<bool>(args->num_cols, true);
for (int i = 0; i<raw_csv.num_actual_cols; i++){
std::string col_name = args->names[i];
raw_csv.col_names.push_back(col_name);
}
}
// User can give
if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){
if(args->use_cols_int!=NULL){
for (int i = 0; i<raw_csv.num_actual_cols; i++)
raw_csv.h_parseCol[i]=false;
for(int i=0; i < args->use_cols_int_len; i++){
int pos = args->use_cols_int[i];
raw_csv.h_parseCol[pos]=true;
}
raw_csv.num_active_cols = args->use_cols_int_len;
}else{
for (int i = 0; i<raw_csv.num_actual_cols; i++)
raw_csv.h_parseCol[i]=false;
int countFound=0;
for(int i=0; i < args->use_cols_char_len; i++){
std::string colName(args->use_cols_char[i]);
for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){
if(colName==*it){
countFound++;
int pos=std::distance(raw_csv.col_names.begin(), it);
raw_csv.h_parseCol[pos]=true;
break;
}
}
}
raw_csv.num_active_cols = countFound;
}
}
raw_csv.d_parseCol = raw_csv.h_parseCol;
//-----------------------------------------------------------------------------
//--- done with host data
if (args->input_data_form == gdf_csv_input_form::FILE_PATH)
{
close(fd);
munmap(map_data, map_size);
}
//-----------------------------------------------------------------------------
//--- Auto detect types of the vectors
if(args->dtype==NULL){
if (raw_csv.num_records == 0) {
checkError(GDF_INVALID_API_CALL, "read_csv: no data available for data type inference");
}
vector<column_data_t> h_ColumnData(raw_csv.num_active_cols);
device_buffer<column_data_t> d_ColumnData(raw_csv.num_active_cols);
CUDA_TRY( hipMemset(d_ColumnData.data(), 0, (sizeof(column_data_t) * (raw_csv.num_active_cols)) ) ) ;
launch_dataTypeDetection(&raw_csv, d_ColumnData.data());
CUDA_TRY( hipMemcpy(h_ColumnData.data(), d_ColumnData.data(), sizeof(column_data_t) * (raw_csv.num_active_cols), hipMemcpyDeviceToHost));
// host: array of dtypes (since gdf_columns are not created until end)
vector<gdf_dtype> d_detectedTypes;
raw_csv.dtypes.clear();
for(int col = 0; col < raw_csv.num_active_cols; col++){
unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+
h_ColumnData[col].countInt32+h_ColumnData[col].countInt64;
if (h_ColumnData[col].countNULL == raw_csv.num_records){
d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory
} else if(h_ColumnData[col].countString>0L){
d_detectedTypes.push_back(GDF_STRING); // For auto-detection, we are currently not supporting strings.
} else if(h_ColumnData[col].countDateAndTime>0L){
d_detectedTypes.push_back(GDF_DATE64);
} else if(h_ColumnData[col].countFloat > 0L ||
(h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) {
// The second condition has been added to conform to PANDAS which states that a colum of
// integers with a single NULL record need to be treated as floats.
d_detectedTypes.push_back(GDF_FLOAT64);
}
else {
d_detectedTypes.push_back(GDF_INT64);
}
}
raw_csv.dtypes=d_detectedTypes;
}
else{
for ( int x = 0; x < raw_csv.num_actual_cols; x++) {
std::string temp_type = args->dtype[x];
gdf_dtype col_dtype = GDF_invalid;
if(temp_type.find(':') != std::string::npos){
for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){
std::size_t idx = temp_type.find(':');
if(temp_type.substr( 0, idx) == *it){
std::string temp_dtype = temp_type.substr( idx +1);
col_dtype = convertStringToDtype(temp_dtype);
break;
}
}
}
else{
col_dtype = convertStringToDtype( temp_type );
}
if (col_dtype == GDF_invalid)
return GDF_UNSUPPORTED_DTYPE;
raw_csv.dtypes.push_back(col_dtype);
}
}
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<gdf_column_wrapper> columns;
for (int col = 0, active_col = 0; col < raw_csv.num_actual_cols; ++col) {
if (raw_csv.h_parseCol[col]) {
// When dtypes are inferred, it contains only active column values
auto dtype = raw_csv.dtypes[args->dtype == nullptr ? active_col : col];
columns.emplace_back(raw_csv.num_records, dtype,
gdf_dtype_extra_info{TIME_UNIT_NONE},
raw_csv.col_names[col]);
CUDF_EXPECTS(columns.back().allocate() == GDF_SUCCESS, "Cannot allocate columns");
active_col++;
}
}
// Convert CSV input to cuDF output
if (raw_csv.num_records != 0) {
thrust::host_vector<gdf_dtype> h_dtypes(raw_csv.num_active_cols);
thrust::host_vector<void*> h_data(raw_csv.num_active_cols);
thrust::host_vector<gdf_valid_type*> h_valid(raw_csv.num_active_cols);
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
h_dtypes[i] = columns[i]->dtype;
h_data[i] = columns[i]->data;
h_valid[i] = columns[i]->valid;
}
rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes;
rmm::device_vector<void*> d_data = h_data;
rmm::device_vector<gdf_valid_type*> d_valid = h_valid;
rmm::device_vector<gdf_size_type> d_valid_counts(raw_csv.num_active_cols, 0);
CUDF_EXPECTS(
launch_dataConvertColumns(&raw_csv, d_data.data().get(),
d_valid.data().get(), d_dtypes.data().get(),
d_valid_counts.data().get()) == GDF_SUCCESS,
"Cannot convert CSV data to cuDF columns");
CUDA_TRY(hipStreamSynchronize(0));
thrust::host_vector<gdf_size_type> h_valid_counts = d_valid_counts;
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
columns[i]->null_count = columns[i]->size - h_valid_counts[i];
}
}
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
if (columns[i]->dtype == GDF_STRING) {
std::unique_ptr<NVStrings, decltype(&NVStrings::destroy)> str_data(
NVStrings::create_from_index(static_cast<string_pair *>(columns[i]->data), columns[i]->size),
&NVStrings::destroy);
RMM_TRY(RMM_FREE(columns[i]->data, 0));
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
if ((raw_csv.opts.quotechar != '\0') &&
(raw_csv.opts.doublequote == true)) {
const std::string quotechar(1, raw_csv.opts.quotechar);
const std::string doublequotechar(2, raw_csv.opts.quotechar);
columns[i]->data = str_data->replace(doublequotechar.c_str(), quotechar.c_str());
}
else {
columns[i]->data = str_data.release();
}
}
}
// Transfer ownership to raw pointer output arguments
args->data = (gdf_column **)malloc(sizeof(gdf_column *) * raw_csv.num_active_cols);
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
args->data[i] = columns[i].release();
}
args->num_cols_out = raw_csv.num_active_cols;
args->num_rows_out = raw_csv.num_records;
return error;
}
/*
* What is passed in is the data type as a string, need to convert that into gdf_dtype enum
*/
gdf_dtype convertStringToDtype(std::string &dtype) {
if (dtype.compare( "str") == 0) return GDF_STRING;
if (dtype.compare( "date") == 0) return GDF_DATE64;
if (dtype.compare( "date32") == 0) return GDF_DATE32;
if (dtype.compare( "date64") == 0) return GDF_DATE64;
if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP;
if (dtype.compare( "category") == 0) return GDF_CATEGORY;
if (dtype.compare( "float") == 0) return GDF_FLOAT32;
if (dtype.compare( "float32") == 0) return GDF_FLOAT32;
if (dtype.compare( "float64") == 0) return GDF_FLOAT64;
if (dtype.compare( "double") == 0) return GDF_FLOAT64;
if (dtype.compare( "short") == 0) return GDF_INT16;
if (dtype.compare( "int") == 0) return GDF_INT32;
if (dtype.compare( "int32") == 0) return GDF_INT32;
if (dtype.compare( "int64") == 0) return GDF_INT64;
if (dtype.compare( "long") == 0) return GDF_INT64;
return GDF_invalid;
}
/**---------------------------------------------------------------------------*
* @brief Infer the compression type from the compression parameter and
* the input file name
*
* Returns "none" if the input is not compressed.
*
* @param[in] compression_arg Input string that is potentially describing
* the compression type. Can also be nullptr, "none", or "infer"
* @param[in] filepath path + name of the input file
* @param[out] compression_type String describing the inferred compression type
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*---------------------------------------------------------------------------**/
gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type)
{
if (compression_arg && 0 == strcasecmp(compression_arg, "none")) {
compression_arg = nullptr;
}
if (compression_arg && 0 == strcasecmp(compression_arg, "infer"))
{
const char *file_ext = strrchr(filepath, '.');
compression_arg = nullptr;
if (file_ext)
{
if (!strcasecmp(file_ext, ".gz"))
compression_arg = "gzip";
else if (!strcasecmp(file_ext, ".zip"))
compression_arg = "zip";
else if (!strcasecmp(file_ext, ".bz2"))
compression_arg = "bz2";
else if (!strcasecmp(file_ext, ".xz"))
compression_arg = "xz";
else {
// TODO: return error here
}
}
}
compression_type = compression_arg == nullptr? "none":string(compression_arg);
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Uncompresses the input data and stores the allocated result into
* a vector.
*
* @param[in] h_data Pointer to the csv data in host memory
* @param[in] num_bytes Size of the input data, in bytes
* @param[in] compression String describing the compression type
* @param[out] h_uncomp_data Vector containing the output uncompressed data
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*---------------------------------------------------------------------------**/
gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector<char>& h_uncomp_data)
{
int comp_type = IO_UNCOMP_STREAM_TYPE_INFER;
if (compression == "gzip")
comp_type = IO_UNCOMP_STREAM_TYPE_GZIP;
else if (compression == "zip")
comp_type = IO_UNCOMP_STREAM_TYPE_ZIP;
else if (compression == "bz2")
comp_type = IO_UNCOMP_STREAM_TYPE_BZIP2;
else if (compression == "xz")
comp_type = IO_UNCOMP_STREAM_TYPE_XZ;
return io_uncompress_single_h2d(h_data, num_bytes, comp_type, h_uncomp_data);
}
/**---------------------------------------------------------------------------*
* @brief Uploads the relevant segment of the input csv data onto the GPU.
*
* Only rows that need to be read are copied to the GPU, based on parameters
* like nrows, skipheader, skipfooter.
* Also updates the array of record starts to match the device data offset.
*
* @param[in] h_uncomp_data Pointer to the uncompressed csv data in host memory
* @param[in] h_uncomp_size Size of the input data, in bytes
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*---------------------------------------------------------------------------**/
gdf_error uploadDataToDevice(const char *h_uncomp_data, size_t h_uncomp_size,
raw_csv_t *raw_csv) {
// Exclude the rows that are to be skipped from the start
GDF_REQUIRE(raw_csv->num_records > raw_csv->skiprows, GDF_INVALID_API_CALL);
const auto first_row = raw_csv->skiprows;
raw_csv->num_records = raw_csv->num_records - first_row;
std::vector<uint64_t> h_rec_starts(raw_csv->num_records);
CUDA_TRY(hipMemcpy(h_rec_starts.data(), raw_csv->recStart.data() + first_row,
sizeof(uint64_t) * h_rec_starts.size(),
hipMemcpyDefault));
// Trim lines that are outside range, but keep one greater for the end offset
if (raw_csv->byte_range_size != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() &&
*it > uint64_t(raw_csv->byte_range_size)) {
--it;
}
if ((it + 2) < h_rec_starts.end()) {
h_rec_starts.erase(it + 2, h_rec_starts.end());
}
}
// Discard only blank lines, only fully comment lines, or both.
// If only handling one of them, ensure it doesn't match against \0 as we do
// not want certain scenarios to be filtered out (end-of-file)
if (raw_csv->opts.skipblanklines || raw_csv->opts.comment != '\0') {
const auto match_newline = raw_csv->opts.skipblanklines ? raw_csv->opts.terminator
: raw_csv->opts.comment;
const auto match_comment = raw_csv->opts.comment != '\0' ? raw_csv->opts.comment
: match_newline;
const auto match_return = (raw_csv->opts.skipblanklines &&
raw_csv->opts.terminator == '\n') ? '\r'
: match_comment;
h_rec_starts.erase(
std::remove_if(h_rec_starts.begin(), h_rec_starts.end(),
[&](uint64_t i) {
return (h_uncomp_data[i] == match_newline ||
h_uncomp_data[i] == match_return ||
h_uncomp_data[i] == match_comment);
}),
h_rec_starts.end());
}
raw_csv->num_records = h_rec_starts.size();
// Exclude the rows before the header row (inclusive)
// But copy the header data for parsing the column names later (if necessary)
if (raw_csv->header_row >= 0) {
raw_csv->header.assign(
h_uncomp_data + h_rec_starts[raw_csv->header_row],
h_uncomp_data + h_rec_starts[raw_csv->header_row + 1]);
h_rec_starts.erase(h_rec_starts.begin(),
h_rec_starts.begin() + raw_csv->header_row + 1);
raw_csv->num_records = h_rec_starts.size();
}
// Exclude the rows that exceed past the requested number
if (raw_csv->nrows >= 0 && raw_csv->nrows < raw_csv->num_records) {
h_rec_starts.resize(raw_csv->nrows + 1); // include end offset
raw_csv->num_records = h_rec_starts.size();
}
// Exclude the rows that are to be skipped from the end
if (raw_csv->skipfooter > 0) {
h_rec_starts.resize(h_rec_starts.size() - raw_csv->skipfooter);
raw_csv->num_records = h_rec_starts.size();
}
// Check that there is actual data to parse
GDF_REQUIRE(raw_csv->num_records > 0, GDF_INVALID_API_CALL);
const auto start_offset = h_rec_starts.front();
const auto end_offset = h_rec_starts.back();
raw_csv->num_bytes = end_offset - start_offset;
assert(raw_csv->num_bytes <= h_uncomp_size);
raw_csv->num_bits = (raw_csv->num_bytes + 63) / 64;
// Resize and upload the rows of interest
raw_csv->recStart.resize(raw_csv->num_records);
CUDA_TRY(hipMemcpy(raw_csv->recStart.data(), h_rec_starts.data(),
sizeof(uint64_t) * raw_csv->num_records,
hipMemcpyDefault));
// Upload the raw data that is within the rows of interest
raw_csv->data = device_buffer<char>(raw_csv->num_bytes);
CUDA_TRY(hipMemcpy(raw_csv->data.data(), h_uncomp_data + start_offset,
raw_csv->num_bytes, hipMemcpyHostToDevice));
// Adjust row start positions to account for the data subcopy
thrust::transform(rmm::exec_policy()->on(0), raw_csv->recStart.data(),
raw_csv->recStart.data() + raw_csv->num_records,
thrust::make_constant_iterator(start_offset),
raw_csv->recStart.data(), thrust::minus<uint64_t>());
// The array of row offsets includes EOF
// reduce the number of records by one to exclude it from the row count
raw_csv->num_records--;
return GDF_SUCCESS;
}
//----------------------------------------------------------------------------------------------------------------
// CUDA Kernels
//----------------------------------------------------------------------------------------------------------------
/**---------------------------------------------------------------------------*
* @brief Helper function to setup and launch CSV parsing CUDA kernel.
*
* @param[in,out] raw_csv The metadata for the CSV data
* @param[out] gdf The output column data
* @param[out] valid The bitmaps indicating whether column fields are valid
* @param[out] str_cols The start/end offsets for string data types
* @param[out] num_valid The numbers of valid fields in columns
*
* @return gdf_error GDF_SUCCESS upon completion
*---------------------------------------------------------------------------**/
gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf,
gdf_valid_type **valid, gdf_dtype *d_dtypes,
gdf_size_type *num_valid) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,
convertCsvToGdf));
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( convertCsvToGdf) , dim3(gridSize), dim3(blockSize) , 0, 0,
raw_csv->data.data(), raw_csv->opts, raw_csv->num_records,
raw_csv->num_actual_cols, raw_csv->d_parseCol.data().get(), raw_csv->recStart.data(),
d_dtypes, gdf, valid, num_valid);
CUDA_TRY(hipGetLastError());
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Functor for converting CSV data to cuDF data type value.
*---------------------------------------------------------------------------**/
struct ConvertFunctor {
/**---------------------------------------------------------------------------*
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*---------------------------------------------------------------------------**/
template <typename T,
typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(
const char *csvData, void *gdfColumnData, long rowIndex, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdfColumnData)[rowIndex]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, csvData + start, field_len)) {
value = 1;
} else if (serializedTrieContains(opts.falseValuesTrie, csvData + start, field_len)) {
value = 0;
} else {
value = convertStrToValue<T>(csvData, start, end, opts);
}
}
/**---------------------------------------------------------------------------*
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*---------------------------------------------------------------------------**/
template <typename T,
typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(
const char *csvData, void *gdfColumnData, long rowIndex, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdfColumnData)[rowIndex]};
value = convertStrToValue<T>(csvData, start, end, opts);
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel iterates over the data until the end of the current field
*
* Also iterates over (one or more) delimiter characters after the field.
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] pos Offset to start the seeking from
* @param[in] stop Offset of the end of the row
*
* @return long position of the last character in the field, including the
* delimiter(s) folloing the field data
*---------------------------------------------------------------------------**/
__device__
long seekFieldEnd(const char *raw_csv, const ParseOptions opts, long pos, long stop) {
bool quotation = false;
while(true){
// Use simple logic to ignore control chars between any quote seq
// Handles nominal cases including doublequotes within quotes, but
// may not output exact failures as PANDAS for malformed fields
if(raw_csv[pos] == opts.quotechar){
quotation = !quotation;
}
else if(quotation==false){
if(raw_csv[pos] == opts.delimiter){
while (opts.multi_delimiter &&
pos < stop &&
raw_csv[pos + 1] == opts.delimiter) {
++pos;
}
break;
}
else if(raw_csv[pos] == opts.terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){
stop--;
break;
}
}
if(pos>=stop)
break;
pos++;
}
return pos;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] num_records The number of lines/rows of CSV data
* @param[in] num_columns The number of columns of CSV data
* @param[in] parseCol Whether to parse or skip a column
* @param[in] recStart The start the CSV data of interest
* @param[in] dtype The data type of the column
* @param[out] gdf_data The output column data
* @param[out] valid The bitmaps indicating whether column fields are valid
* @param[out] num_valid The numbers of valid fields in columns
*
* @return gdf_error GDF_SUCCESS upon completion
*---------------------------------------------------------------------------**/
__global__ void convertCsvToGdf(char *raw_csv, const ParseOptions opts,
gdf_size_type num_records, int num_columns,
bool *parseCol, uint64_t *recStart,
gdf_dtype *dtype, void **gdf_data,
gdf_valid_type **valid,
gdf_size_type *num_valid)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long start = recStart[rec_id];
long stop = recStart[rec_id + 1];
long pos = start;
int col = 0;
int actual_col = 0;
while(col<num_columns){
if(start>stop)
break;
pos = seekFieldEnd(raw_csv, opts, pos, stop);
if(parseCol[col]==true){
// check if the entire field is a NaN string - consistent with pandas
const bool is_na = serializedTrieContains(opts.naValuesTrie, raw_csv + start, pos - start);
// Modify start & end to ignore whitespace and quotechars
long tempPos=pos-1;
if(!is_na && dtype[actual_col] != gdf_dtype::GDF_CATEGORY && dtype[actual_col] != gdf_dtype::GDF_STRING){
adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos, opts.quotechar);
}
if(!is_na && start<=(tempPos)) { // Empty fields are not legal values
// Type dispatcher does not handle GDF_STRINGS
if (dtype[actual_col] == gdf_dtype::GDF_STRING) {
long end = pos;
if(opts.keepquotes==false){
if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){
start++;
end--;
}
}
auto str_list = static_cast<string_pair*>(gdf_data[actual_col]);
str_list[rec_id].first = raw_csv + start;
str_list[rec_id].second = end - start;
} else {
cudf::type_dispatcher(
dtype[actual_col], ConvertFunctor{}, raw_csv,
gdf_data[actual_col], rec_id, start, tempPos, opts);
}
// set the valid bitmap - all bits were set to 0 to start
long bitmapIdx = whichBitmap(rec_id); // which bitmap
long bitIdx = whichBit(rec_id); // which bit - over an 8-bit index
setBit(valid[actual_col]+bitmapIdx, bitIdx); // This is done with atomics
atomicAdd(&num_valid[actual_col], 1);
}
else if(dtype[actual_col]==gdf_dtype::GDF_STRING){
auto str_list = static_cast<string_pair*>(gdf_data[actual_col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
/**---------------------------------------------------------------------------*
* @brief Helper function to setup and launch CSV data type detect CUDA kernel.
*
* @param[in] raw_csv The metadata for the CSV data
* @param[out] d_columnData The count for each column data type
*
* @return gdf_error GDF_SUCCESS upon completion
*---------------------------------------------------------------------------**/
gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv,
column_data_t *d_columnData) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,
dataTypeDetection));
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( dataTypeDetection) , dim3(gridSize), dim3(blockSize) , 0, 0,
raw_csv->data.data(), raw_csv->opts, raw_csv->num_records,
raw_csv->num_actual_cols, raw_csv->d_parseCol.data().get(), raw_csv->recStart.data(),
d_columnData);
CUDA_TRY(hipGetLastError());
return GDF_SUCCESS;
}
/**
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*/
__device__ __forceinline__
bool isDigit(char c, bool is_hex){
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/**
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*/
__device__ __forceinline__
bool isLikeFloat(long len, long digit_cnt, long decimal_cnt, long dash_cnt, long exponent_cnt) {
// Can't have more than one exponent and one decimal point
if (decimal_cnt > 1) return false;
if (exponent_cnt > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_cnt == 0 && exponent_cnt == 0) return false;
// Can only have one '-' per component
if (dash_cnt > 1 + exponent_cnt) return false;
// If anything other than these characters is present, it's not a float
if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) return false;
// Needs at least 1 digit, 2 if exponent is present
if (digit_cnt < 1 + exponent_cnt) return false;
return true;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] num_records The number of lines/rows of CSV data
* @param[in] num_columns The number of columns of CSV data
* @param[in] parseCol Whether to parse or skip a column
* @param[in] recStart The start the CSV data of interest
* @param[out] d_columnData The count for each column data type
*
* @returns GDF_SUCCESS upon successful computation
*---------------------------------------------------------------------------**/
__global__
void dataTypeDetection(char *raw_csv,
const ParseOptions opts,
gdf_size_type num_records,
int num_columns,
bool *parseCol,
uint64_t *recStart,
column_data_t *d_columnData)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long start = recStart[rec_id];
long stop = recStart[rec_id + 1];
long pos = start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while(col<num_columns){
if(start>stop)
break;
pos = seekFieldEnd(raw_csv, opts, pos, stop);
// Checking if this is a column that the user wants --- user can filter columns
if(parseCol[col]==true){
long tempPos=pos-1;
// Checking if the record is NULL
if(start>(tempPos)){
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
pos++;
start=pos;
col++;
actual_col++;
continue;
}
long countNumber=0;
long countDecimal=0;
long countSlash=0;
long countDash=0;
long countColon=0;
long countString=0;
long countExponent=0;
// Modify start & end to ignore whitespace and quotechars
// This could possibly result in additional empty fields
adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos);
const long strLen = tempPos - start + 1;
const bool maybe_hex = ((strLen > 2 && raw_csv[start] == '0' && raw_csv[start + 1] == 'x') ||
(strLen > 3 && raw_csv[start] == '-' && raw_csv[start + 1] == '0' && raw_csv[start + 2] == 'x'));
for(long startPos=start; startPos<=tempPos; startPos++){
if(isDigit(raw_csv[startPos], maybe_hex)){
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (raw_csv[startPos]){
case '.':
countDecimal++;break;
case '-':
countDash++; break;
case '/':
countSlash++;break;
case ':':
countColon++;break;
case 'e':
case 'E':
if (!maybe_hex && startPos > start && startPos < tempPos)
countExponent++;break;
default:
countString++;
break;
}
}
// Integers have to have the length of the string
long int_req_number_cnt = strLen;
// Off by one if they start with a minus sign
if(raw_csv[start]=='-' && strLen > 1){
--int_req_number_cnt;
}
// Off by one if they are a hexadecimal number
if(maybe_hex) {
--int_req_number_cnt;
}
if(strLen==0){ // Removed spaces ' ' in the pre-processing and thus we can have an empty string.
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
}
else if(countNumber==int_req_number_cnt){
// Checking to see if we the integer value requires 8,16,32,64 bits.
// This will allow us to allocate the exact amount of memory.
const auto value = convertStrToValue<int64_t>(raw_csv, start, tempPos, opts);
const size_t field_len = tempPos - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, raw_csv + start, field_len) ||
serializedTrieContains(opts.falseValuesTrie, raw_csv + start, field_len)){
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
else if(value >= (1L<<31)){
atomicAdd(& d_columnData[actual_col].countInt64, 1L);
}
else if(value >= (1L<<15)){
atomicAdd(& d_columnData[actual_col].countInt32, 1L);
}
else if(value >= (1L<<7)){
atomicAdd(& d_columnData[actual_col].countInt16, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
}
else if(isLikeFloat(strLen, countNumber, countDecimal, countDash, countExponent)){
atomicAdd(& d_columnData[actual_col].countFloat, 1L);
}
// The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not
// a data-time field. Also, if a string has multiple decimals, then is not a legit number.
else if(countString > 3 || countDecimal > 1){
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
else {
// A date field can have either one or two '-' or '\'. A legal combination will only have one of them.
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations.
if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){
if((countColon<=2)){
atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
// Default field is string type.
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
| cc4f81429ee5452e25fe251762c4d0a537a5f0f1.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file csv-reader.cu code to read csv data
*
* CSV Reader
*/
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include "type_conversion.cuh"
#include "datetime_parser.cuh"
#include "cudf.h"
#include "utilities/error_utils.hpp"
#include "utilities/trie.cuh"
#include "utilities/type_dispatcher.hpp"
#include "utilities/cudf_utils.h"
#include <nvstrings/NVStrings.h>
#include "rmm/rmm.h"
#include "rmm/thrust_rmm_allocator.h"
#include "io/comp/io_uncomp.h"
#include "io/utilities/parsing_utils.cuh"
#include "io/utilities/wrapper_utils.hpp"
using std::vector;
using std::string;
/**---------------------------------------------------------------------------*
* @brief Struct used for internal parsing state
*---------------------------------------------------------------------------**/
typedef struct raw_csv_ {
device_buffer<char> data; // on-device: the raw unprocessed CSV data - loaded as a large char * array
device_buffer<uint64_t> recStart; // on-device: Starting position of the records.
ParseOptions opts; // options to control parsing behavior
long num_bytes; // host: the number of bytes in the data
long num_bits; // host: the number of 64-bit bitmaps (different than valid)
gdf_size_type num_records; // host: number of records loaded into device memory, and then number of records to read
// int num_cols; // host: number of columns
int num_active_cols;// host: number of columns that will be return to user.
int num_actual_cols;// host: number of columns in the file --- based on the number of columns in header
vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end)
vector<string> col_names; // host: array of column names
thrust::host_vector<bool> h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
rmm::device_vector<bool> d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
long byte_range_offset; // offset into the data to start parsing
long byte_range_size; // length of the data of interest to parse
gdf_size_type header_row; ///< host: Row index of the header
gdf_size_type nrows; ///< host: Number of rows to read. -1 for all rows
gdf_size_type skiprows; ///< host: Number of rows to skip from the start
gdf_size_type skipfooter; ///< host: Number of rows to skip from the end
std::vector<char> header; ///< host: Header row data, for parsing column names
string prefix; ///< host: Prepended to column ID if there is no header or input column names
rmm::device_vector<SerialTrieNode> d_trueTrie; // device: serialized trie of values to recognize as true
rmm::device_vector<SerialTrieNode> d_falseTrie;// device: serialized trie of values to recognize as false
rmm::device_vector<SerialTrieNode> d_naTrie; // device: serialized trie of NA values
} raw_csv_t;
typedef struct column_data_ {
unsigned long long countFloat;
unsigned long long countDateAndTime;
unsigned long long countString;
unsigned long long countInt8;
unsigned long long countInt16;
unsigned long long countInt32;
unsigned long long countInt64;
gdf_size_type countNULL;
} column_data_t;
using string_pair = std::pair<const char*,size_t>;
//
//---------------create and process ---------------------------------------------
//
gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv);
// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d);
gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type);
gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes,
const string& compression,
vector<char>& h_uncomp_data);
gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv);
gdf_dtype convertStringToDtype(std::string &dtype);
#define checkError(error, txt) if ( error != GDF_SUCCESS) { std::cerr << "ERROR: " << error << " in " << txt << std::endl; return error; }
//
//---------------CUDA Kernel ---------------------------------------------
//
gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **d_gdf,
gdf_valid_type **valid, gdf_dtype *d_dtypes,
gdf_size_type *num_valid);
gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv,
column_data_t *d_columnData);
__global__ void convertCsvToGdf(char *csv, const ParseOptions opts,
gdf_size_type num_records, int num_columns,
bool *parseCol, uint64_t *recStart,
gdf_dtype *dtype, void **gdf_data,
gdf_valid_type **valid,
gdf_size_type *num_valid);
__global__ void dataTypeDetection(char *raw_csv, const ParseOptions opts,
gdf_size_type num_records, int num_columns,
bool *parseCol, uint64_t *recStart,
column_data_t *d_columnData);
//
//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels ---------------------------------------------
//
__device__ long whichBitmap(long record) { return (record/8); }
__device__ int whichBit(long record) { return (record % 8); }
__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val)
{
int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8);
atomicOr(base_address, int_val);
}
__device__ void setBit(gdf_valid_type* address, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
validAtomicOR(address, bitMask[bit]);
}
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns=0) noexcept {
constexpr size_t max_row_bytes = 16*1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0){
// Use flat size if the number of columns is not known
return max_row_bytes;
}
else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar) {
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) {
str.erase(first_quote, 1);
}
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) {
str.erase(last_quote, 1);
}
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter
*
* The first row can be either the header row, or the first data row
*
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*/
gdf_error setColumnNamesFromCsv(raw_csv_t* raw_csv) {
vector<char> first_row = raw_csv->header;
// No header, read the first data row
if (first_row.empty()) {
uint64_t first_row_len{};
// If file only contains one row, raw_csv->recStart[1] is not valid
if (raw_csv->num_records > 1) {
CUDA_TRY(cudaMemcpy(&first_row_len, raw_csv->recStart.data() + 1, sizeof(uint64_t), cudaMemcpyDefault));
}
else {
// File has one row - use the file size for the row size
first_row_len = raw_csv->num_bytes / sizeof(char);
}
first_row.resize(first_row_len);
CUDA_TRY(cudaMemcpy(first_row.data(), raw_csv->data.data(), first_row_len * sizeof(char), cudaMemcpyDefault));
}
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if(first_row[pos] == raw_csv->opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 ||
(!quotation && first_row[pos] == raw_csv->opts.terminator) ||
(!quotation && first_row[pos] == raw_csv->opts.delimiter)) {
// This is the header, add the column name
if (raw_csv->header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == raw_csv->opts.delimiter || first_row[pos] == raw_csv->opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's part of the terminator
if (col_name_len > 0 &&
raw_csv->opts.terminator == '\n' &&
first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
raw_csv->col_names.push_back(removeQuotes(new_col_name, raw_csv->opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is a blank line following the header.
// In this case, first_row includes multiple line terminators at the end, as the new recStart belongs
// to a line that comes after the blank line(s)
if (!quotation && first_row[pos] == raw_csv->opts.terminator){
break;
}
}
else {
// This is the first data row, add the automatically generated name
raw_csv->col_names.push_back(raw_csv->prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (raw_csv->opts.multi_delimiter &&
pos < first_row.size() &&
first_row[pos] == raw_csv->opts.delimiter &&
first_row[pos + 1] == raw_csv->opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Updates the raw_csv_t object with the total number of rows and
* quotation characters in the file
*
* Does not count the quotations if quotechar is set to '/0'.
*
* @param[in] h_data Pointer to the csv data in host memory
* @param[in] h_size Size of the input data, in bytes
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error
*---------------------------------------------------------------------------**/
gdf_error countRecordsAndQuotes(const char *h_data, size_t h_size, raw_csv_t *raw_csv) {
vector<char> chars_to_count{raw_csv->opts.terminator};
if (raw_csv->opts.quotechar != '\0') {
chars_to_count.push_back(raw_csv->opts.quotechar);
}
raw_csv->num_records = countAllFromSet(h_data, h_size, chars_to_count);
// If not starting at an offset, add an extra row to account for the first row in the file
if (raw_csv->byte_range_offset == 0) {
++raw_csv->num_records;
}
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Updates the raw_csv_t object with the offset of each row in the file
* Also add positions of each quotation character in the file.
*
* Does not process the quotations if quotechar is set to '/0'.
*
* @param[in] h_data Pointer to the csv data in host memory
* @param[in] h_size Size of the input data, in bytes
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error
*---------------------------------------------------------------------------**/
gdf_error setRecordStarts(const char *h_data, size_t h_size, raw_csv_t *raw_csv) {
// Allocate space to hold the record starting points
const bool last_line_terminated = (h_data[h_size - 1] == raw_csv->opts.terminator);
// If the last line is not terminated, allocate space for the EOF entry (added later)
const gdf_size_type record_start_count = raw_csv->num_records + (last_line_terminated ? 0 : 1);
raw_csv->recStart = device_buffer<uint64_t>(record_start_count);
auto* find_result_ptr = raw_csv->recStart.data();
if (raw_csv->byte_range_offset == 0) {
find_result_ptr++;
CUDA_TRY(cudaMemsetAsync(raw_csv->recStart.data(), 0ull, sizeof(uint64_t)));
}
vector<char> chars_to_find{raw_csv->opts.terminator};
if (raw_csv->opts.quotechar != '\0') {
chars_to_find.push_back(raw_csv->opts.quotechar);
}
// Passing offset = 1 to return positions AFTER the found character
findAllFromSet(h_data, h_size, chars_to_find, 1, find_result_ptr);
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart.data(), raw_csv->recStart.data() + raw_csv->num_records);
// Currently, ignoring lineterminations within quotes is handled by recording
// the records of both, and then filtering out the records that is a quotechar
// or a linetermination within a quotechar pair. The future major refactoring
// of csv_reader and its kernels will probably use a different tactic.
if (raw_csv->opts.quotechar != '\0') {
vector<uint64_t> h_rec_starts(raw_csv->num_records);
const size_t rec_start_size = sizeof(uint64_t) * (h_rec_starts.size());
CUDA_TRY( cudaMemcpy(h_rec_starts.data(), raw_csv->recStart.data(), rec_start_size, cudaMemcpyDeviceToHost) );
auto recCount = raw_csv->num_records;
bool quotation = false;
for (gdf_size_type i = 1; i < raw_csv->num_records; ++i) {
if (h_data[h_rec_starts[i] - 1] == raw_csv->opts.quotechar) {
quotation = !quotation;
h_rec_starts[i] = raw_csv->num_bytes;
recCount--;
}
else if (quotation) {
h_rec_starts[i] = raw_csv->num_bytes;
recCount--;
}
}
CUDA_TRY( cudaMemcpy(raw_csv->recStart.data(), h_rec_starts.data(), rec_start_size, cudaMemcpyHostToDevice) );
thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart.data(), raw_csv->recStart.data() + raw_csv->num_records);
raw_csv->num_records = recCount;
}
if (!last_line_terminated){
// Add the EOF as the last record when the terminator is missing in the last line
const uint64_t eof_offset = h_size;
CUDA_TRY(cudaMemcpy(raw_csv->recStart.data() + raw_csv->num_records, &eof_offset, sizeof(uint64_t), cudaMemcpyDefault));
// Update the record count
++raw_csv->num_records;
}
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Reads CSV-structured data and returns an array of gdf_columns.
*
* @param[in,out] args Structure containing input and output args
*
* @return gdf_error GDF_SUCCESS if successful, otherwise an error code.
*---------------------------------------------------------------------------**/
gdf_error read_csv(csv_read_arg *args)
{
gdf_error error = gdf_error::GDF_SUCCESS;
//-----------------------------------------------------------------------------
// create the CSV data structure - this will be filled in as the CSV data is processed.
// Done first to validate data types
raw_csv_t raw_csv{};
// error = parseArguments(args, raw_csv);
raw_csv.num_actual_cols = args->num_cols;
raw_csv.num_active_cols = args->num_cols;
raw_csv.num_records = 0;
raw_csv.header_row = args->header;
raw_csv.skiprows = args->skiprows;
raw_csv.skipfooter = args->skipfooter;
raw_csv.nrows = args->nrows;
raw_csv.prefix = args->prefix == nullptr ? "" : string(args->prefix);
if (args->delim_whitespace) {
raw_csv.opts.delimiter = ' ';
raw_csv.opts.multi_delimiter = true;
} else {
raw_csv.opts.delimiter = args->delimiter;
raw_csv.opts.multi_delimiter = false;
}
if (args->windowslinetermination) {
raw_csv.opts.terminator = '\n';
} else {
raw_csv.opts.terminator = args->lineterminator;
}
if (args->quotechar != '\0' && args->quoting != QUOTE_NONE) {
raw_csv.opts.quotechar = args->quotechar;
raw_csv.opts.keepquotes = false;
raw_csv.opts.doublequote = args->doublequote;
} else {
raw_csv.opts.quotechar = '\0';
raw_csv.opts.keepquotes = true;
raw_csv.opts.doublequote = false;
}
raw_csv.opts.skipblanklines = args->skip_blank_lines;
raw_csv.opts.comment = args->comment;
raw_csv.opts.dayfirst = args->dayfirst;
raw_csv.opts.decimal = args->decimal;
raw_csv.opts.thousands = args->thousands;
if (raw_csv.opts.decimal == raw_csv.opts.delimiter) {
checkError(GDF_INVALID_API_CALL, "Decimal point cannot be the same as the delimiter");
}
if (raw_csv.opts.thousands == raw_csv.opts.delimiter) {
checkError(GDF_INVALID_API_CALL, "Thousands separator cannot be the same as the delimiter");
}
string compression_type;
error = inferCompressionType(args->compression, args->filepath_or_buffer, compression_type);
checkError(error, "call to inferCompressionType");
raw_csv.byte_range_offset = args->byte_range_offset;
raw_csv.byte_range_size = args->byte_range_size;
if (raw_csv.byte_range_offset > 0 || raw_csv.byte_range_size > 0) {
if (raw_csv.nrows >= 0 || raw_csv.skiprows > 0 || raw_csv.skipfooter > 0) {
checkError(GDF_INVALID_API_CALL,
"Cannot manually limit rows to be read when using the byte range parameter");
}
if (compression_type != "none") {
checkError(GDF_INVALID_API_CALL,
"Cannot read compressed input when using the byte range parameter");
}
}
// Handle user-defined booleans values, whereby field data is substituted
// with true/false values; CUDF booleans are int types of 0 or 1
vector<string> true_values{"True", "TRUE"};
if (args->true_values != nullptr && args->num_true_values > 0) {
for (int i = 0; i < args->num_true_values; ++i) {
true_values.emplace_back(args->true_values[i]);
}
}
raw_csv.d_trueTrie = createSerializedTrie(true_values);
raw_csv.opts.trueValuesTrie = raw_csv.d_trueTrie.data().get();
vector<string> false_values{"False", "FALSE"};
if (args->false_values != nullptr && args->num_false_values > 0) {
for (int i = 0; i < args->num_false_values; ++i) {
false_values.emplace_back(args->false_values[i]);
}
}
raw_csv.d_falseTrie = createSerializedTrie(false_values);
raw_csv.opts.falseValuesTrie = raw_csv.d_falseTrie.data().get();
if (args->na_filter &&
(args->keep_default_na || (args->na_values != nullptr && args->num_na_values > 0))) {
vector<string> na_values{
"#N/A", "#N/A N/A", "#NA", "-1.#IND",
"-1.#QNAN", "-NaN", "-nan", "1.#IND",
"1.#QNAN", "N/A", "NA", "NULL",
"NaN", "n/a", "nan", "null"};
if(!args->keep_default_na){
na_values.clear();
}
if (args->na_values != nullptr && args->num_na_values > 0) {
for (int i = 0; i < args->num_na_values; ++i) {
na_values.emplace_back(args->na_values[i]);
}
}
raw_csv.d_naTrie = createSerializedTrie(na_values);
raw_csv.opts.naValuesTrie = raw_csv.d_naTrie.data().get();
}
args->data = nullptr;
//-----------------------------------------------------------------------------
// memory map in the data
void * map_data = NULL;
size_t map_size = 0;
size_t map_offset = 0;
int fd = 0;
if (args->input_data_form == gdf_csv_input_form::FILE_PATH)
{
fd = open(args->filepath_or_buffer, O_RDONLY );
if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); }
struct stat st{};
if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); }
const auto file_size = st.st_size;
const auto page_size = sysconf(_SC_PAGESIZE);
if (args->byte_range_offset >= (size_t)file_size) {
close(fd);
checkError(GDF_INVALID_API_CALL, "The byte_range offset is larger than the file size");
}
// Have to align map offset to page size
map_offset = (args->byte_range_offset/page_size)*page_size;
// Set to rest-of-the-file size, will reduce based on the byte range size
raw_csv.num_bytes = map_size = file_size - map_offset;
// Include the page padding in the mapped size
const size_t page_padding = args->byte_range_offset - map_offset;
const size_t padded_byte_range_size = raw_csv.byte_range_size + page_padding;
if (raw_csv.byte_range_size != 0 && padded_byte_range_size < map_size) {
// Need to make sure that w/ padding we don't overshoot the end of file
map_size = min(padded_byte_range_size + calculateMaxRowSize(args->num_cols), map_size);
}
// Ignore page padding for parsing purposes
raw_csv.num_bytes = map_size - page_padding;
map_data = mmap(0, map_size, PROT_READ, MAP_PRIVATE, fd, map_offset);
if (map_data == MAP_FAILED || map_size==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); }
}
else if (args->input_data_form == gdf_csv_input_form::HOST_BUFFER)
{
map_data = (void *)args->filepath_or_buffer;
raw_csv.num_bytes = map_size = args->buffer_size;
}
else { checkError(GDF_C_ERROR, "invalid input type"); }
const char* h_uncomp_data;
size_t h_uncomp_size = 0;
// Used when the input data is compressed, to ensure the allocated uncompressed data is freed
vector<char> h_uncomp_data_owner;
if (compression_type == "none") {
// Do not use the owner vector here to avoid copying the whole file to the heap
h_uncomp_data = (const char*)map_data + (args->byte_range_offset - map_offset);
h_uncomp_size = raw_csv.num_bytes;
}
else {
error = getUncompressedHostData( (const char *)map_data, map_size, compression_type, h_uncomp_data_owner);
checkError(error, "call to getUncompressedHostData");
h_uncomp_data = h_uncomp_data_owner.data();
h_uncomp_size = h_uncomp_data_owner.size();
}
assert(h_uncomp_data != nullptr);
assert(h_uncomp_size != 0);
error = countRecordsAndQuotes(h_uncomp_data, h_uncomp_size, &raw_csv);
checkError(error, "call to count the number of rows");
error = setRecordStarts(h_uncomp_data, h_uncomp_size, &raw_csv);
checkError(error, "call to store the row offsets");
error = uploadDataToDevice(h_uncomp_data, h_uncomp_size, &raw_csv);
checkError(error, "call to upload the CSV data to the device");
//-----------------------------------------------------------------------------
//--- done with host data
if (args->input_data_form == gdf_csv_input_form::FILE_PATH)
{
close(fd);
munmap(map_data, map_size);
}
//-----------------------------------------------------------------------------
//-- Populate the header
// Check if the user gave us a list of column names
if(args->names == nullptr) {
error = setColumnNamesFromCsv(&raw_csv);
if (error != GDF_SUCCESS) {
return error;
}
const int h_num_cols = raw_csv.col_names.size();
// Initialize a boolean array that states if a column needs to read or filtered.
raw_csv.h_parseCol = thrust::host_vector<bool>(h_num_cols, true);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < raw_csv.col_names.size(); ++col_idx) {
if (raw_csv.col_names[col_idx].empty()) {
raw_csv.col_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
int h_dup_cols_removed = 0;
// Looking for duplicates
for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){
bool found_dupe = false;
for (auto it2 = (it+1); it2 != raw_csv.col_names.end(); it2++){
if (*it==*it2){
found_dupe=true;
break;
}
}
if(found_dupe){
int count=1;
for (auto it2 = (it+1); it2 != raw_csv.col_names.end(); it2++){
if (*it==*it2){
if(args->mangle_dupe_cols){
// Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X.
std::string newColName = *it2;
newColName += "." + std::to_string(count);
count++;
*it2 = newColName;
} else{
// All duplicate fields will be ignored.
int pos=std::distance(raw_csv.col_names.begin(), it2);
raw_csv.h_parseCol[pos]=false;
h_dup_cols_removed++;
}
}
}
}
}
raw_csv.num_actual_cols = h_num_cols; // Actual number of columns in the CSV file
raw_csv.num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields
}
else {
raw_csv.h_parseCol = thrust::host_vector<bool>(args->num_cols, true);
for (int i = 0; i<raw_csv.num_actual_cols; i++){
std::string col_name = args->names[i];
raw_csv.col_names.push_back(col_name);
}
}
// User can give
if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){
if(args->use_cols_int!=NULL){
for (int i = 0; i<raw_csv.num_actual_cols; i++)
raw_csv.h_parseCol[i]=false;
for(int i=0; i < args->use_cols_int_len; i++){
int pos = args->use_cols_int[i];
raw_csv.h_parseCol[pos]=true;
}
raw_csv.num_active_cols = args->use_cols_int_len;
}else{
for (int i = 0; i<raw_csv.num_actual_cols; i++)
raw_csv.h_parseCol[i]=false;
int countFound=0;
for(int i=0; i < args->use_cols_char_len; i++){
std::string colName(args->use_cols_char[i]);
for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){
if(colName==*it){
countFound++;
int pos=std::distance(raw_csv.col_names.begin(), it);
raw_csv.h_parseCol[pos]=true;
break;
}
}
}
raw_csv.num_active_cols = countFound;
}
}
raw_csv.d_parseCol = raw_csv.h_parseCol;
//-----------------------------------------------------------------------------
//--- done with host data
if (args->input_data_form == gdf_csv_input_form::FILE_PATH)
{
close(fd);
munmap(map_data, map_size);
}
//-----------------------------------------------------------------------------
//--- Auto detect types of the vectors
if(args->dtype==NULL){
if (raw_csv.num_records == 0) {
checkError(GDF_INVALID_API_CALL, "read_csv: no data available for data type inference");
}
vector<column_data_t> h_ColumnData(raw_csv.num_active_cols);
device_buffer<column_data_t> d_ColumnData(raw_csv.num_active_cols);
CUDA_TRY( cudaMemset(d_ColumnData.data(), 0, (sizeof(column_data_t) * (raw_csv.num_active_cols)) ) ) ;
launch_dataTypeDetection(&raw_csv, d_ColumnData.data());
CUDA_TRY( cudaMemcpy(h_ColumnData.data(), d_ColumnData.data(), sizeof(column_data_t) * (raw_csv.num_active_cols), cudaMemcpyDeviceToHost));
// host: array of dtypes (since gdf_columns are not created until end)
vector<gdf_dtype> d_detectedTypes;
raw_csv.dtypes.clear();
for(int col = 0; col < raw_csv.num_active_cols; col++){
unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+
h_ColumnData[col].countInt32+h_ColumnData[col].countInt64;
if (h_ColumnData[col].countNULL == raw_csv.num_records){
d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory
} else if(h_ColumnData[col].countString>0L){
d_detectedTypes.push_back(GDF_STRING); // For auto-detection, we are currently not supporting strings.
} else if(h_ColumnData[col].countDateAndTime>0L){
d_detectedTypes.push_back(GDF_DATE64);
} else if(h_ColumnData[col].countFloat > 0L ||
(h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) {
// The second condition has been added to conform to PANDAS which states that a colum of
// integers with a single NULL record need to be treated as floats.
d_detectedTypes.push_back(GDF_FLOAT64);
}
else {
d_detectedTypes.push_back(GDF_INT64);
}
}
raw_csv.dtypes=d_detectedTypes;
}
else{
for ( int x = 0; x < raw_csv.num_actual_cols; x++) {
std::string temp_type = args->dtype[x];
gdf_dtype col_dtype = GDF_invalid;
if(temp_type.find(':') != std::string::npos){
for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){
std::size_t idx = temp_type.find(':');
if(temp_type.substr( 0, idx) == *it){
std::string temp_dtype = temp_type.substr( idx +1);
col_dtype = convertStringToDtype(temp_dtype);
break;
}
}
}
else{
col_dtype = convertStringToDtype( temp_type );
}
if (col_dtype == GDF_invalid)
return GDF_UNSUPPORTED_DTYPE;
raw_csv.dtypes.push_back(col_dtype);
}
}
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<gdf_column_wrapper> columns;
for (int col = 0, active_col = 0; col < raw_csv.num_actual_cols; ++col) {
if (raw_csv.h_parseCol[col]) {
// When dtypes are inferred, it contains only active column values
auto dtype = raw_csv.dtypes[args->dtype == nullptr ? active_col : col];
columns.emplace_back(raw_csv.num_records, dtype,
gdf_dtype_extra_info{TIME_UNIT_NONE},
raw_csv.col_names[col]);
CUDF_EXPECTS(columns.back().allocate() == GDF_SUCCESS, "Cannot allocate columns");
active_col++;
}
}
// Convert CSV input to cuDF output
if (raw_csv.num_records != 0) {
thrust::host_vector<gdf_dtype> h_dtypes(raw_csv.num_active_cols);
thrust::host_vector<void*> h_data(raw_csv.num_active_cols);
thrust::host_vector<gdf_valid_type*> h_valid(raw_csv.num_active_cols);
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
h_dtypes[i] = columns[i]->dtype;
h_data[i] = columns[i]->data;
h_valid[i] = columns[i]->valid;
}
rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes;
rmm::device_vector<void*> d_data = h_data;
rmm::device_vector<gdf_valid_type*> d_valid = h_valid;
rmm::device_vector<gdf_size_type> d_valid_counts(raw_csv.num_active_cols, 0);
CUDF_EXPECTS(
launch_dataConvertColumns(&raw_csv, d_data.data().get(),
d_valid.data().get(), d_dtypes.data().get(),
d_valid_counts.data().get()) == GDF_SUCCESS,
"Cannot convert CSV data to cuDF columns");
CUDA_TRY(cudaStreamSynchronize(0));
thrust::host_vector<gdf_size_type> h_valid_counts = d_valid_counts;
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
columns[i]->null_count = columns[i]->size - h_valid_counts[i];
}
}
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
if (columns[i]->dtype == GDF_STRING) {
std::unique_ptr<NVStrings, decltype(&NVStrings::destroy)> str_data(
NVStrings::create_from_index(static_cast<string_pair *>(columns[i]->data), columns[i]->size),
&NVStrings::destroy);
RMM_TRY(RMM_FREE(columns[i]->data, 0));
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
if ((raw_csv.opts.quotechar != '\0') &&
(raw_csv.opts.doublequote == true)) {
const std::string quotechar(1, raw_csv.opts.quotechar);
const std::string doublequotechar(2, raw_csv.opts.quotechar);
columns[i]->data = str_data->replace(doublequotechar.c_str(), quotechar.c_str());
}
else {
columns[i]->data = str_data.release();
}
}
}
// Transfer ownership to raw pointer output arguments
args->data = (gdf_column **)malloc(sizeof(gdf_column *) * raw_csv.num_active_cols);
for (int i = 0; i < raw_csv.num_active_cols; ++i) {
args->data[i] = columns[i].release();
}
args->num_cols_out = raw_csv.num_active_cols;
args->num_rows_out = raw_csv.num_records;
return error;
}
/*
* What is passed in is the data type as a string, need to convert that into gdf_dtype enum
*/
gdf_dtype convertStringToDtype(std::string &dtype) {
if (dtype.compare( "str") == 0) return GDF_STRING;
if (dtype.compare( "date") == 0) return GDF_DATE64;
if (dtype.compare( "date32") == 0) return GDF_DATE32;
if (dtype.compare( "date64") == 0) return GDF_DATE64;
if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP;
if (dtype.compare( "category") == 0) return GDF_CATEGORY;
if (dtype.compare( "float") == 0) return GDF_FLOAT32;
if (dtype.compare( "float32") == 0) return GDF_FLOAT32;
if (dtype.compare( "float64") == 0) return GDF_FLOAT64;
if (dtype.compare( "double") == 0) return GDF_FLOAT64;
if (dtype.compare( "short") == 0) return GDF_INT16;
if (dtype.compare( "int") == 0) return GDF_INT32;
if (dtype.compare( "int32") == 0) return GDF_INT32;
if (dtype.compare( "int64") == 0) return GDF_INT64;
if (dtype.compare( "long") == 0) return GDF_INT64;
return GDF_invalid;
}
/**---------------------------------------------------------------------------*
* @brief Infer the compression type from the compression parameter and
* the input file name
*
* Returns "none" if the input is not compressed.
*
* @param[in] compression_arg Input string that is potentially describing
* the compression type. Can also be nullptr, "none", or "infer"
* @param[in] filepath path + name of the input file
* @param[out] compression_type String describing the inferred compression type
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*---------------------------------------------------------------------------**/
gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type)
{
if (compression_arg && 0 == strcasecmp(compression_arg, "none")) {
compression_arg = nullptr;
}
if (compression_arg && 0 == strcasecmp(compression_arg, "infer"))
{
const char *file_ext = strrchr(filepath, '.');
compression_arg = nullptr;
if (file_ext)
{
if (!strcasecmp(file_ext, ".gz"))
compression_arg = "gzip";
else if (!strcasecmp(file_ext, ".zip"))
compression_arg = "zip";
else if (!strcasecmp(file_ext, ".bz2"))
compression_arg = "bz2";
else if (!strcasecmp(file_ext, ".xz"))
compression_arg = "xz";
else {
// TODO: return error here
}
}
}
compression_type = compression_arg == nullptr? "none":string(compression_arg);
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Uncompresses the input data and stores the allocated result into
* a vector.
*
* @param[in] h_data Pointer to the csv data in host memory
* @param[in] num_bytes Size of the input data, in bytes
* @param[in] compression String describing the compression type
* @param[out] h_uncomp_data Vector containing the output uncompressed data
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*---------------------------------------------------------------------------**/
gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector<char>& h_uncomp_data)
{
int comp_type = IO_UNCOMP_STREAM_TYPE_INFER;
if (compression == "gzip")
comp_type = IO_UNCOMP_STREAM_TYPE_GZIP;
else if (compression == "zip")
comp_type = IO_UNCOMP_STREAM_TYPE_ZIP;
else if (compression == "bz2")
comp_type = IO_UNCOMP_STREAM_TYPE_BZIP2;
else if (compression == "xz")
comp_type = IO_UNCOMP_STREAM_TYPE_XZ;
return io_uncompress_single_h2d(h_data, num_bytes, comp_type, h_uncomp_data);
}
/**---------------------------------------------------------------------------*
* @brief Uploads the relevant segment of the input csv data onto the GPU.
*
* Only rows that need to be read are copied to the GPU, based on parameters
* like nrows, skipheader, skipfooter.
* Also updates the array of record starts to match the device data offset.
*
* @param[in] h_uncomp_data Pointer to the uncompressed csv data in host memory
* @param[in] h_uncomp_size Size of the input data, in bytes
* @param[in,out] raw_csv Structure containing the csv parsing parameters
* and intermediate results
*
* @return gdf_error with error code on failure, otherwise GDF_SUCCESS
*---------------------------------------------------------------------------**/
gdf_error uploadDataToDevice(const char *h_uncomp_data, size_t h_uncomp_size,
raw_csv_t *raw_csv) {
// Exclude the rows that are to be skipped from the start
GDF_REQUIRE(raw_csv->num_records > raw_csv->skiprows, GDF_INVALID_API_CALL);
const auto first_row = raw_csv->skiprows;
raw_csv->num_records = raw_csv->num_records - first_row;
std::vector<uint64_t> h_rec_starts(raw_csv->num_records);
CUDA_TRY(cudaMemcpy(h_rec_starts.data(), raw_csv->recStart.data() + first_row,
sizeof(uint64_t) * h_rec_starts.size(),
cudaMemcpyDefault));
// Trim lines that are outside range, but keep one greater for the end offset
if (raw_csv->byte_range_size != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() &&
*it > uint64_t(raw_csv->byte_range_size)) {
--it;
}
if ((it + 2) < h_rec_starts.end()) {
h_rec_starts.erase(it + 2, h_rec_starts.end());
}
}
// Discard only blank lines, only fully comment lines, or both.
// If only handling one of them, ensure it doesn't match against \0 as we do
// not want certain scenarios to be filtered out (end-of-file)
if (raw_csv->opts.skipblanklines || raw_csv->opts.comment != '\0') {
const auto match_newline = raw_csv->opts.skipblanklines ? raw_csv->opts.terminator
: raw_csv->opts.comment;
const auto match_comment = raw_csv->opts.comment != '\0' ? raw_csv->opts.comment
: match_newline;
const auto match_return = (raw_csv->opts.skipblanklines &&
raw_csv->opts.terminator == '\n') ? '\r'
: match_comment;
h_rec_starts.erase(
std::remove_if(h_rec_starts.begin(), h_rec_starts.end(),
[&](uint64_t i) {
return (h_uncomp_data[i] == match_newline ||
h_uncomp_data[i] == match_return ||
h_uncomp_data[i] == match_comment);
}),
h_rec_starts.end());
}
raw_csv->num_records = h_rec_starts.size();
// Exclude the rows before the header row (inclusive)
// But copy the header data for parsing the column names later (if necessary)
if (raw_csv->header_row >= 0) {
raw_csv->header.assign(
h_uncomp_data + h_rec_starts[raw_csv->header_row],
h_uncomp_data + h_rec_starts[raw_csv->header_row + 1]);
h_rec_starts.erase(h_rec_starts.begin(),
h_rec_starts.begin() + raw_csv->header_row + 1);
raw_csv->num_records = h_rec_starts.size();
}
// Exclude the rows that exceed past the requested number
if (raw_csv->nrows >= 0 && raw_csv->nrows < raw_csv->num_records) {
h_rec_starts.resize(raw_csv->nrows + 1); // include end offset
raw_csv->num_records = h_rec_starts.size();
}
// Exclude the rows that are to be skipped from the end
if (raw_csv->skipfooter > 0) {
h_rec_starts.resize(h_rec_starts.size() - raw_csv->skipfooter);
raw_csv->num_records = h_rec_starts.size();
}
// Check that there is actual data to parse
GDF_REQUIRE(raw_csv->num_records > 0, GDF_INVALID_API_CALL);
const auto start_offset = h_rec_starts.front();
const auto end_offset = h_rec_starts.back();
raw_csv->num_bytes = end_offset - start_offset;
assert(raw_csv->num_bytes <= h_uncomp_size);
raw_csv->num_bits = (raw_csv->num_bytes + 63) / 64;
// Resize and upload the rows of interest
raw_csv->recStart.resize(raw_csv->num_records);
CUDA_TRY(cudaMemcpy(raw_csv->recStart.data(), h_rec_starts.data(),
sizeof(uint64_t) * raw_csv->num_records,
cudaMemcpyDefault));
// Upload the raw data that is within the rows of interest
raw_csv->data = device_buffer<char>(raw_csv->num_bytes);
CUDA_TRY(cudaMemcpy(raw_csv->data.data(), h_uncomp_data + start_offset,
raw_csv->num_bytes, cudaMemcpyHostToDevice));
// Adjust row start positions to account for the data subcopy
thrust::transform(rmm::exec_policy()->on(0), raw_csv->recStart.data(),
raw_csv->recStart.data() + raw_csv->num_records,
thrust::make_constant_iterator(start_offset),
raw_csv->recStart.data(), thrust::minus<uint64_t>());
// The array of row offsets includes EOF
// reduce the number of records by one to exclude it from the row count
raw_csv->num_records--;
return GDF_SUCCESS;
}
//----------------------------------------------------------------------------------------------------------------
// CUDA Kernels
//----------------------------------------------------------------------------------------------------------------
/**---------------------------------------------------------------------------*
* @brief Helper function to setup and launch CSV parsing CUDA kernel.
*
* @param[in,out] raw_csv The metadata for the CSV data
* @param[out] gdf The output column data
* @param[out] valid The bitmaps indicating whether column fields are valid
* @param[out] str_cols The start/end offsets for string data types
* @param[out] num_valid The numbers of valid fields in columns
*
* @return gdf_error GDF_SUCCESS upon completion
*---------------------------------------------------------------------------**/
gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf,
gdf_valid_type **valid, gdf_dtype *d_dtypes,
gdf_size_type *num_valid) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,
convertCsvToGdf));
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
convertCsvToGdf <<< gridSize, blockSize >>> (
raw_csv->data.data(), raw_csv->opts, raw_csv->num_records,
raw_csv->num_actual_cols, raw_csv->d_parseCol.data().get(), raw_csv->recStart.data(),
d_dtypes, gdf, valid, num_valid);
CUDA_TRY(cudaGetLastError());
return GDF_SUCCESS;
}
/**---------------------------------------------------------------------------*
* @brief Functor for converting CSV data to cuDF data type value.
*---------------------------------------------------------------------------**/
struct ConvertFunctor {
/**---------------------------------------------------------------------------*
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*---------------------------------------------------------------------------**/
template <typename T,
typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(
const char *csvData, void *gdfColumnData, long rowIndex, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdfColumnData)[rowIndex]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, csvData + start, field_len)) {
value = 1;
} else if (serializedTrieContains(opts.falseValuesTrie, csvData + start, field_len)) {
value = 0;
} else {
value = convertStrToValue<T>(csvData, start, end, opts);
}
}
/**---------------------------------------------------------------------------*
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*---------------------------------------------------------------------------**/
template <typename T,
typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(
const char *csvData, void *gdfColumnData, long rowIndex, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdfColumnData)[rowIndex]};
value = convertStrToValue<T>(csvData, start, end, opts);
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel iterates over the data until the end of the current field
*
* Also iterates over (one or more) delimiter characters after the field.
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] pos Offset to start the seeking from
* @param[in] stop Offset of the end of the row
*
* @return long position of the last character in the field, including the
* delimiter(s) folloing the field data
*---------------------------------------------------------------------------**/
__device__
long seekFieldEnd(const char *raw_csv, const ParseOptions opts, long pos, long stop) {
bool quotation = false;
while(true){
// Use simple logic to ignore control chars between any quote seq
// Handles nominal cases including doublequotes within quotes, but
// may not output exact failures as PANDAS for malformed fields
if(raw_csv[pos] == opts.quotechar){
quotation = !quotation;
}
else if(quotation==false){
if(raw_csv[pos] == opts.delimiter){
while (opts.multi_delimiter &&
pos < stop &&
raw_csv[pos + 1] == opts.delimiter) {
++pos;
}
break;
}
else if(raw_csv[pos] == opts.terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){
stop--;
break;
}
}
if(pos>=stop)
break;
pos++;
}
return pos;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] num_records The number of lines/rows of CSV data
* @param[in] num_columns The number of columns of CSV data
* @param[in] parseCol Whether to parse or skip a column
* @param[in] recStart The start the CSV data of interest
* @param[in] dtype The data type of the column
* @param[out] gdf_data The output column data
* @param[out] valid The bitmaps indicating whether column fields are valid
* @param[out] num_valid The numbers of valid fields in columns
*
* @return gdf_error GDF_SUCCESS upon completion
*---------------------------------------------------------------------------**/
__global__ void convertCsvToGdf(char *raw_csv, const ParseOptions opts,
gdf_size_type num_records, int num_columns,
bool *parseCol, uint64_t *recStart,
gdf_dtype *dtype, void **gdf_data,
gdf_valid_type **valid,
gdf_size_type *num_valid)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long start = recStart[rec_id];
long stop = recStart[rec_id + 1];
long pos = start;
int col = 0;
int actual_col = 0;
while(col<num_columns){
if(start>stop)
break;
pos = seekFieldEnd(raw_csv, opts, pos, stop);
if(parseCol[col]==true){
// check if the entire field is a NaN string - consistent with pandas
const bool is_na = serializedTrieContains(opts.naValuesTrie, raw_csv + start, pos - start);
// Modify start & end to ignore whitespace and quotechars
long tempPos=pos-1;
if(!is_na && dtype[actual_col] != gdf_dtype::GDF_CATEGORY && dtype[actual_col] != gdf_dtype::GDF_STRING){
adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos, opts.quotechar);
}
if(!is_na && start<=(tempPos)) { // Empty fields are not legal values
// Type dispatcher does not handle GDF_STRINGS
if (dtype[actual_col] == gdf_dtype::GDF_STRING) {
long end = pos;
if(opts.keepquotes==false){
if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){
start++;
end--;
}
}
auto str_list = static_cast<string_pair*>(gdf_data[actual_col]);
str_list[rec_id].first = raw_csv + start;
str_list[rec_id].second = end - start;
} else {
cudf::type_dispatcher(
dtype[actual_col], ConvertFunctor{}, raw_csv,
gdf_data[actual_col], rec_id, start, tempPos, opts);
}
// set the valid bitmap - all bits were set to 0 to start
long bitmapIdx = whichBitmap(rec_id); // which bitmap
long bitIdx = whichBit(rec_id); // which bit - over an 8-bit index
setBit(valid[actual_col]+bitmapIdx, bitIdx); // This is done with atomics
atomicAdd(&num_valid[actual_col], 1);
}
else if(dtype[actual_col]==gdf_dtype::GDF_STRING){
auto str_list = static_cast<string_pair*>(gdf_data[actual_col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
/**---------------------------------------------------------------------------*
* @brief Helper function to setup and launch CSV data type detect CUDA kernel.
*
* @param[in] raw_csv The metadata for the CSV data
* @param[out] d_columnData The count for each column data type
*
* @return gdf_error GDF_SUCCESS upon completion
*---------------------------------------------------------------------------**/
gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv,
column_data_t *d_columnData) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,
dataTypeDetection));
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
dataTypeDetection <<< gridSize, blockSize >>> (
raw_csv->data.data(), raw_csv->opts, raw_csv->num_records,
raw_csv->num_actual_cols, raw_csv->d_parseCol.data().get(), raw_csv->recStart.data(),
d_columnData);
CUDA_TRY(cudaGetLastError());
return GDF_SUCCESS;
}
/**
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*/
__device__ __forceinline__
bool isDigit(char c, bool is_hex){
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/**
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*/
__device__ __forceinline__
bool isLikeFloat(long len, long digit_cnt, long decimal_cnt, long dash_cnt, long exponent_cnt) {
// Can't have more than one exponent and one decimal point
if (decimal_cnt > 1) return false;
if (exponent_cnt > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_cnt == 0 && exponent_cnt == 0) return false;
// Can only have one '-' per component
if (dash_cnt > 1 + exponent_cnt) return false;
// If anything other than these characters is present, it's not a float
if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) return false;
// Needs at least 1 digit, 2 if exponent is present
if (digit_cnt < 1 + exponent_cnt) return false;
return true;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] num_records The number of lines/rows of CSV data
* @param[in] num_columns The number of columns of CSV data
* @param[in] parseCol Whether to parse or skip a column
* @param[in] recStart The start the CSV data of interest
* @param[out] d_columnData The count for each column data type
*
* @returns GDF_SUCCESS upon successful computation
*---------------------------------------------------------------------------**/
__global__
void dataTypeDetection(char *raw_csv,
const ParseOptions opts,
gdf_size_type num_records,
int num_columns,
bool *parseCol,
uint64_t *recStart,
column_data_t *d_columnData)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long start = recStart[rec_id];
long stop = recStart[rec_id + 1];
long pos = start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while(col<num_columns){
if(start>stop)
break;
pos = seekFieldEnd(raw_csv, opts, pos, stop);
// Checking if this is a column that the user wants --- user can filter columns
if(parseCol[col]==true){
long tempPos=pos-1;
// Checking if the record is NULL
if(start>(tempPos)){
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
pos++;
start=pos;
col++;
actual_col++;
continue;
}
long countNumber=0;
long countDecimal=0;
long countSlash=0;
long countDash=0;
long countColon=0;
long countString=0;
long countExponent=0;
// Modify start & end to ignore whitespace and quotechars
// This could possibly result in additional empty fields
adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos);
const long strLen = tempPos - start + 1;
const bool maybe_hex = ((strLen > 2 && raw_csv[start] == '0' && raw_csv[start + 1] == 'x') ||
(strLen > 3 && raw_csv[start] == '-' && raw_csv[start + 1] == '0' && raw_csv[start + 2] == 'x'));
for(long startPos=start; startPos<=tempPos; startPos++){
if(isDigit(raw_csv[startPos], maybe_hex)){
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (raw_csv[startPos]){
case '.':
countDecimal++;break;
case '-':
countDash++; break;
case '/':
countSlash++;break;
case ':':
countColon++;break;
case 'e':
case 'E':
if (!maybe_hex && startPos > start && startPos < tempPos)
countExponent++;break;
default:
countString++;
break;
}
}
// Integers have to have the length of the string
long int_req_number_cnt = strLen;
// Off by one if they start with a minus sign
if(raw_csv[start]=='-' && strLen > 1){
--int_req_number_cnt;
}
// Off by one if they are a hexadecimal number
if(maybe_hex) {
--int_req_number_cnt;
}
if(strLen==0){ // Removed spaces ' ' in the pre-processing and thus we can have an empty string.
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
}
else if(countNumber==int_req_number_cnt){
// Checking to see if we the integer value requires 8,16,32,64 bits.
// This will allow us to allocate the exact amount of memory.
const auto value = convertStrToValue<int64_t>(raw_csv, start, tempPos, opts);
const size_t field_len = tempPos - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, raw_csv + start, field_len) ||
serializedTrieContains(opts.falseValuesTrie, raw_csv + start, field_len)){
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
else if(value >= (1L<<31)){
atomicAdd(& d_columnData[actual_col].countInt64, 1L);
}
else if(value >= (1L<<15)){
atomicAdd(& d_columnData[actual_col].countInt32, 1L);
}
else if(value >= (1L<<7)){
atomicAdd(& d_columnData[actual_col].countInt16, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
}
else if(isLikeFloat(strLen, countNumber, countDecimal, countDash, countExponent)){
atomicAdd(& d_columnData[actual_col].countFloat, 1L);
}
// The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not
// a data-time field. Also, if a string has multiple decimals, then is not a legit number.
else if(countString > 3 || countDecimal > 1){
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
else {
// A date field can have either one or two '-' or '\'. A legal combination will only have one of them.
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations.
if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){
if((countColon<=2)){
atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
// Default field is string type.
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
|
eade286352af1d7a784c67cb36d507d71d151936.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/scatter.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void scatter_kernel(Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 result)
{
thrust::scatter(thrust::seq, first, last, map_first, result);
}
template<typename T>
void TestScatterDeviceSeq(const size_t n)
{
const size_t output_size = ::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input(n, (T) 1);
thrust::device_vector<T> d_input(n, (T) 1);
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
{
h_map[i] = h_map[i] % output_size;
}
thrust::device_vector<unsigned int> d_map = h_map;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin());
hipLaunchKernelGGL(( scatter_kernel), dim3(1),dim3(1), 0, 0, d_input.begin(), d_input.end(), d_map.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestScatterDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Function>
__global__
void scatter_if_kernel(Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 stencil_first, Iterator4 result, Function f)
{
thrust::scatter_if(thrust::seq, first, last, map_first, stencil_first, result, f);
}
template<typename T>
struct is_even_scatter_if
{
__host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; }
};
template<typename T>
void TestScatterIfDeviceSeq(const size_t n)
{
const size_t output_size = ::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input(n, (T) 1);
thrust::device_vector<T> d_input(n, (T) 1);
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
{
h_map[i] = h_map[i] % output_size;
}
thrust::device_vector<unsigned int> d_map = h_map;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>());
hipLaunchKernelGGL(( scatter_if_kernel), dim3(1),dim3(1), 0, 0, d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestScatterIfDeviceSeq);
| eade286352af1d7a784c67cb36d507d71d151936.cu | #include <unittest/unittest.h>
#include <thrust/scatter.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void scatter_kernel(Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 result)
{
thrust::scatter(thrust::seq, first, last, map_first, result);
}
template<typename T>
void TestScatterDeviceSeq(const size_t n)
{
const size_t output_size = std::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input(n, (T) 1);
thrust::device_vector<T> d_input(n, (T) 1);
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
{
h_map[i] = h_map[i] % output_size;
}
thrust::device_vector<unsigned int> d_map = h_map;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin());
scatter_kernel<<<1,1>>>(d_input.begin(), d_input.end(), d_map.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestScatterDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Function>
__global__
void scatter_if_kernel(Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 stencil_first, Iterator4 result, Function f)
{
thrust::scatter_if(thrust::seq, first, last, map_first, stencil_first, result, f);
}
template<typename T>
struct is_even_scatter_if
{
__host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; }
};
template<typename T>
void TestScatterIfDeviceSeq(const size_t n)
{
const size_t output_size = std::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input(n, (T) 1);
thrust::device_vector<T> d_input(n, (T) 1);
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
{
h_map[i] = h_map[i] % output_size;
}
thrust::device_vector<unsigned int> d_map = h_map;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>());
scatter_if_kernel<<<1,1>>>(d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestScatterIfDeviceSeq);
|
e6fa3bf9f49584d7dcc0e937e6a8db21f47006b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_Binarize_Forward(
float const *x_buf,
float *y_buf,
int frame_size,
int frame_stride
)
{
int node = blockIdx.x;
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
float x = x_buf[frame_stride*node + frame];
x = (x > 0) ? 1 : 0;
y_buf[frame_stride*node + frame] = x;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Binarize_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
int node_size,
int frame_size,
int frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int unit_x = 512;
dim3 grid(node_size);
dim3 block(unit_x);
hipLaunchKernelGGL(( kernal_fp32_Binarize_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_HardTanh_Backward
(
const float* x_buf,
const float* dy_buf,
float* dx_buf,
int frame_size,
int frame_stride
)
{
int node = blockIdx.x;
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
float x = x_buf[frame_stride*node + frame];
float dy = dy_buf[frame_stride*node + frame];
dy = (x >= -1.0f && x <= 1.0f) ? dy : 0.0f;
dx_buf[frame_stride*node + frame] = dy;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_HardTanh_Backward
(
float const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
int node_size,
int frame_size,
int frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int unit_x = 512;
dim3 grid(node_size);
dim3 block(unit_x);
hipLaunchKernelGGL(( kernal_fp32_HardTanh_Backward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_dy_buf,
dev_dx_buf,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
| e6fa3bf9f49584d7dcc0e937e6a8db21f47006b6.cu | #include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_Binarize_Forward(
float const *x_buf,
float *y_buf,
int frame_size,
int frame_stride
)
{
int node = blockIdx.x;
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
float x = x_buf[frame_stride*node + frame];
x = (x > 0) ? 1 : 0;
y_buf[frame_stride*node + frame] = x;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Binarize_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
int node_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int unit_x = 512;
dim3 grid(node_size);
dim3 block(unit_x);
kernal_fp32_Binarize_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_HardTanh_Backward
(
const float* x_buf,
const float* dy_buf,
float* dx_buf,
int frame_size,
int frame_stride
)
{
int node = blockIdx.x;
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
float x = x_buf[frame_stride*node + frame];
float dy = dy_buf[frame_stride*node + frame];
dy = (x >= -1.0f && x <= 1.0f) ? dy : 0.0f;
dx_buf[frame_stride*node + frame] = dy;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_HardTanh_Backward
(
float const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
int node_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int unit_x = 512;
dim3 grid(node_size);
dim3 block(unit_x);
kernal_fp32_HardTanh_Backward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_dy_buf,
dev_dx_buf,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
|
b5d7974114f76f690cead7c9dc044d62276ea679.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#define BYDIMF 2
#define CDIM 5
#define BYDIMB 5
#if __CUDA_ARCH__ >= 300
/*
* Positive kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float daa[NREPS];
float bb[NREPS][nwindow];
float dbb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v, ascale, bscale;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
float inr = 1.0f / nrows;
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
dbb[j][i] = 0;
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
dbb[j][i] = dbb[j][i+1]; // slide deriv down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
dbb[j][nwindow-1] = 0;
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
if (iwords[SKIP] >= 0) {
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
prod = 0;
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = 1.0f - v; // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] = 0;
}
ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) {
bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp);
v = lrate * CC[i - SKIP - lb];
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] += ascale * v * bb[j][i]; // Update A's derivative
dbb[j][i] += bscale * v * aa[j]; // Update B's derivative
}
}
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the A column
atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]);
}
}
if (iwords[0] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the B column
atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (i = 1; i < nwindow; i++) { // Clear out the derivative queue
if (iwords[i] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Save the B column
if (tid + j * dxy < nrows) {
atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]);
}
}
}
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float bb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
double sum = 0;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
if (i >= SKIP + lb && i <= SKIP + ub) {
if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss)
prod = 20.0f;
} else {
prod = 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // Compute the loss
}
__syncthreads();
for (i = 1; i <= ub - lb; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i <= ub - lb) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
int ib[NSKIP*2];
float prods[NSKIP*2];
float bscale[NSKIP*2];
int ia, iword, lb, ub;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, db, dv, v, ascale, tmp;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1 - v); // All these pairs have label 1
}
__syncthreads(); // Now do scaled gradients
ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B
} else {
bscale[j] = 0;
}
prods[j] = CC[j];
}
__syncthreads();
dv = 0;
for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bb = B[i + ib[j]];
dv += ascale * prods[j] * bb;
db = bscale[j] * prods[j] * aa;
atomicAdd(&B[i + ib[j]], db); // Update B
}
}
atomicAdd(&A[i + ia], dv); // Update A
}
__syncthreads();
}
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
float prods[NSKIP*2];
int ia, iword, lb, ub;
int ib[NSKIP*2];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, v, tmp, sum;
sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1
}
__syncthreads(); // Now sum likelihood over window
for (i = 1; i < 2 * NSKIP; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
}
if (tid == 0) {
atomicAdd(&retval[0], (float)sum);
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float aa[NWA];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
float bscale[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float dv, v, ascale;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
aa[j] = A[i + ia[j]];
}
#pragma unroll
for (k = 0; k < NWB; k++) { // Load B data
bb[k] = B[i + ib[k]];
bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp);
prods[0][k] = 0;
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the products
ascale = pow(max(0, ia[j])*inr + 1.0f, vexp);
dv = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
v = CC[j + k * NWA];
dv += ascale * v * bb[k];
prods[0][k] += bscale[k] * v * aa[j];
}
atomicAdd(&A[i + ia[j]], dv); // Update A
}
#pragma unroll
for (k = 0; k < NWB; k++) {
atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B
}
}
__syncthreads();
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float v;
double sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0
}
for (i = 1; i < NWA*NWB; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) {
const int nwindow = 2*SKIP+1;
float aa[NREPS];
float da[NREPS];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, icol, dxy, lb, ub, iword, cword;
float bb, db, prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
iword = nrows * W[icol]; // Get the current word
__syncthreads();
lb = LB[icol];
ub = UB[icol];
if (iword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get A
aa[j] = A[tid + j * dxy + iword];
} else {
aa[j] = 0;
}
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
__syncthreads();
cword = nrows * W[icol + i]; // Get the current word
prod = 0;
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
prod += aa[j] * bb; // Compute the product between current A, B cols
}
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
prod += __shfl_down(prod, k); // Reduce within warp
}
}
if (threadIdx.x == 0) {
CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
__syncthreads();
for (j = 1; j < blockDim.y; j++) { // Reduce across warps
for (i = tid; i < ub - lb; i += dxy) {
CC[i] += CC[i + j * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i < ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1.0f - v); // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
da[j] = 0;
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
cword = nrows * W[icol + i]; // Get the context word
v = CC[i - lb];
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
da[j] += v * bb;
db = v * aa[j];
atomicAdd(&B[tid + j * dxy + cword], db);
}
}
}
}
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) {
atomicAdd(&A[tid + j * dxy + iword], da[j]);
}
}
}
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float dd[MAXD];
float prods[NWA][NWB];
float aa, v, sum;
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the inner products of these elements
if (ia[j] >= 0) {
aa = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * dd[k];
}
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load B data
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
if (ia[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = CC[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + ia[j]], sum);
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
if (ia[j] >= 0) {
dd[j] = A[i + ia[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
if (ib[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = CC[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + ib[j]], sum);
}
}
}
__syncthreads();
}
}
/*
*
* Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B.
* The column indices are specified by two "word" matrices. The inner products are computed as an outer product
* of the word matrices.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
* Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B.
*
*/
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BDIM];
float aa;
float bb[NWB];
float prods[NWA][NWB];
int wa[NWA];
int wb[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
wb[i] = WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Computes the products of these elements
aa = A[i + wa[j] * nrows];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
#pragma unroll
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWAB; i += dxy) { // Save to main memory
C[i + icol * NWAB] = CC[i];
//atomicAdd(&C[i + icol * NWAB], CC[i]);
}
__syncthreads();
}
}
/*
*
* Simple backward kernel for word2vec.
* Computes the gradient for A given B or vice-versa, and does an SGD update.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
*/
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
const int NWAB = NWA * NWB;
float dd[MAXDIM];
int wa[NWA];
int wb[NWB];
__shared__ float cc[NWA*NWB];
int tid = threadIdx.x;
int fid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int icol, i, j, k;
float sum;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // iterate in columns
#pragma unroll
for (j = 0; j < NWA; j++) {
wa[j] = WA[j + icol * NWA]; // Load the A word matrix
}
__syncthreads();
#pragma unroll
for (j = 0; j < NWB; j++) {
wb[j] = WB[j + icol * NWB]; // Load the B word matrix
}
for (i = fid; i < NWAB; i += dxy) {
cc[i] = C[i + icol * NWAB];
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load the data
dd[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = cc[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + wa[j] * nrows], sum * lrate);
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load the data
dd[j] = A[i + wa[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = cc[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + wb[j] * nrows], sum * lrate);
}
}
}
}
#else
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {}
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {}
#endif
int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 :hipLaunchKernelGGL(( __word2vecPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 3 :hipLaunchKernelGGL(( __word2vecPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 2 :hipLaunchKernelGGL(( __word2vecPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
default : printf("word2vecPos unsupport size %d\n", skip); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecNeg<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50005:hipLaunchKernelGGL(( __word2vecNeg<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 100005:hipLaunchKernelGGL(( __word2vecNeg<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50010:hipLaunchKernelGGL(( __word2vecNeg<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
// case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break;
default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 :hipLaunchKernelGGL(( __word2vecEvalPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break;
case 3 :hipLaunchKernelGGL(( __word2vecEvalPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break;
case 2 :hipLaunchKernelGGL(( __word2vecEvalPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break;
default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecEvalNeg<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
case 50005:hipLaunchKernelGGL(( __word2vecEvalNeg<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
case 100005:hipLaunchKernelGGL(( __word2vecEvalNeg<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
case 50010:hipLaunchKernelGGL(( __word2vecEvalNeg<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
// case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(4096, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecFwd<5,1,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break;
case 50005:hipLaunchKernelGGL(( __word2vecFwd<5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break;
case 100005:hipLaunchKernelGGL(( __word2vecFwd<10,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break;
default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
dim3 threads(32*BYDIMB, 1, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecBwd<5,1,5>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break;
case 50005:hipLaunchKernelGGL(( __word2vecBwd<5,5,5>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break;
case 100005:hipLaunchKernelGGL(( __word2vecBwd<10,5,10>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break;
default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
| b5d7974114f76f690cead7c9dc044d62276ea679.cu | #include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#define BYDIMF 2
#define CDIM 5
#define BYDIMB 5
#if __CUDA_ARCH__ >= 300
/*
* Positive kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float daa[NREPS];
float bb[NREPS][nwindow];
float dbb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v, ascale, bscale;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
float inr = 1.0f / nrows;
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
dbb[j][i] = 0;
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
dbb[j][i] = dbb[j][i+1]; // slide deriv down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
dbb[j][nwindow-1] = 0;
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
if (iwords[SKIP] >= 0) {
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
prod = 0;
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = 1.0f - v; // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] = 0;
}
ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) {
bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp);
v = lrate * CC[i - SKIP - lb];
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] += ascale * v * bb[j][i]; // Update A's derivative
dbb[j][i] += bscale * v * aa[j]; // Update B's derivative
}
}
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the A column
atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]);
}
}
if (iwords[0] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the B column
atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (i = 1; i < nwindow; i++) { // Clear out the derivative queue
if (iwords[i] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Save the B column
if (tid + j * dxy < nrows) {
atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]);
}
}
}
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float bb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
double sum = 0;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
if (i >= SKIP + lb && i <= SKIP + ub) {
if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss)
prod = 20.0f;
} else {
prod = 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // Compute the loss
}
__syncthreads();
for (i = 1; i <= ub - lb; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i <= ub - lb) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
int ib[NSKIP*2];
float prods[NSKIP*2];
float bscale[NSKIP*2];
int ia, iword, lb, ub;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, db, dv, v, ascale, tmp;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1 - v); // All these pairs have label 1
}
__syncthreads(); // Now do scaled gradients
ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B
} else {
bscale[j] = 0;
}
prods[j] = CC[j];
}
__syncthreads();
dv = 0;
for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bb = B[i + ib[j]];
dv += ascale * prods[j] * bb;
db = bscale[j] * prods[j] * aa;
atomicAdd(&B[i + ib[j]], db); // Update B
}
}
atomicAdd(&A[i + ia], dv); // Update A
}
__syncthreads();
}
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
float prods[NSKIP*2];
int ia, iword, lb, ub;
int ib[NSKIP*2];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, v, tmp, sum;
sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1
}
__syncthreads(); // Now sum likelihood over window
for (i = 1; i < 2 * NSKIP; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
}
if (tid == 0) {
atomicAdd(&retval[0], (float)sum);
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float aa[NWA];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
float bscale[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float dv, v, ascale;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
aa[j] = A[i + ia[j]];
}
#pragma unroll
for (k = 0; k < NWB; k++) { // Load B data
bb[k] = B[i + ib[k]];
bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp);
prods[0][k] = 0;
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the products
ascale = pow(max(0, ia[j])*inr + 1.0f, vexp);
dv = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
v = CC[j + k * NWA];
dv += ascale * v * bb[k];
prods[0][k] += bscale[k] * v * aa[j];
}
atomicAdd(&A[i + ia[j]], dv); // Update A
}
#pragma unroll
for (k = 0; k < NWB; k++) {
atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B
}
}
__syncthreads();
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float v;
double sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0
}
for (i = 1; i < NWA*NWB; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) {
const int nwindow = 2*SKIP+1;
float aa[NREPS];
float da[NREPS];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, icol, dxy, lb, ub, iword, cword;
float bb, db, prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
iword = nrows * W[icol]; // Get the current word
__syncthreads();
lb = LB[icol];
ub = UB[icol];
if (iword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get A
aa[j] = A[tid + j * dxy + iword];
} else {
aa[j] = 0;
}
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
__syncthreads();
cword = nrows * W[icol + i]; // Get the current word
prod = 0;
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
prod += aa[j] * bb; // Compute the product between current A, B cols
}
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
prod += __shfl_down(prod, k); // Reduce within warp
}
}
if (threadIdx.x == 0) {
CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
__syncthreads();
for (j = 1; j < blockDim.y; j++) { // Reduce across warps
for (i = tid; i < ub - lb; i += dxy) {
CC[i] += CC[i + j * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i < ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1.0f - v); // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
da[j] = 0;
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
cword = nrows * W[icol + i]; // Get the context word
v = CC[i - lb];
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
da[j] += v * bb;
db = v * aa[j];
atomicAdd(&B[tid + j * dxy + cword], db);
}
}
}
}
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) {
atomicAdd(&A[tid + j * dxy + iword], da[j]);
}
}
}
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float dd[MAXD];
float prods[NWA][NWB];
float aa, v, sum;
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the inner products of these elements
if (ia[j] >= 0) {
aa = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * dd[k];
}
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load B data
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
if (ia[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = CC[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + ia[j]], sum);
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
if (ia[j] >= 0) {
dd[j] = A[i + ia[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
if (ib[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = CC[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + ib[j]], sum);
}
}
}
__syncthreads();
}
}
/*
*
* Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B.
* The column indices are specified by two "word" matrices. The inner products are computed as an outer product
* of the word matrices.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
* Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B.
*
*/
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BDIM];
float aa;
float bb[NWB];
float prods[NWA][NWB];
int wa[NWA];
int wb[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
wb[i] = WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Computes the products of these elements
aa = A[i + wa[j] * nrows];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
#pragma unroll
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWAB; i += dxy) { // Save to main memory
C[i + icol * NWAB] = CC[i];
//atomicAdd(&C[i + icol * NWAB], CC[i]);
}
__syncthreads();
}
}
/*
*
* Simple backward kernel for word2vec.
* Computes the gradient for A given B or vice-versa, and does an SGD update.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
*/
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
const int NWAB = NWA * NWB;
float dd[MAXDIM];
int wa[NWA];
int wb[NWB];
__shared__ float cc[NWA*NWB];
int tid = threadIdx.x;
int fid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int icol, i, j, k;
float sum;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // iterate in columns
#pragma unroll
for (j = 0; j < NWA; j++) {
wa[j] = WA[j + icol * NWA]; // Load the A word matrix
}
__syncthreads();
#pragma unroll
for (j = 0; j < NWB; j++) {
wb[j] = WB[j + icol * NWB]; // Load the B word matrix
}
for (i = fid; i < NWAB; i += dxy) {
cc[i] = C[i + icol * NWAB];
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load the data
dd[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = cc[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + wa[j] * nrows], sum * lrate);
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load the data
dd[j] = A[i + wa[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = cc[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + wb[j] * nrows], sum * lrate);
}
}
}
}
#else
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {}
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {}
#endif
int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 : __word2vecPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 3 : __word2vecPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 2 : __word2vecPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
default : printf("word2vecPos unsupport size %d\n", skip); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecNeg<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50005: __word2vecNeg<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 100005: __word2vecNeg<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50010: __word2vecNeg<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
// case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break;
default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 : __word2vecEvalPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break;
case 3 : __word2vecEvalPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break;
case 2 : __word2vecEvalPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break;
default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecEvalNeg<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
case 50005: __word2vecEvalNeg<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
case 100005: __word2vecEvalNeg<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
case 50010: __word2vecEvalNeg<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
// case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(4096, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecFwd<5,1,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break;
case 50005: __word2vecFwd<5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break;
case 100005: __word2vecFwd<10,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break;
default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
dim3 threads(32*BYDIMB, 1, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecBwd<5,1,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break;
case 50005: __word2vecBwd<5,5,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break;
case 100005: __word2vecBwd<10,5,10><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break;
default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
|
fef3f93cacb1e2323f6352d4ac44410749c658aa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file np_multinomial_op.cu
* \brief Operator for numpy sampling from multinomial distributions
*/
#include "./np_multinomial_op.h"
namespace mxnet {
namespace op {
template<typename DType>
void CheckPvalGPU(const OpContext& ctx, DType* input, int prob_length) {
std::vector<DType> pvals_(prob_length);
hipStream_t stream = mshadow::Stream<gpu>::GetStream(ctx.get_stream<gpu>());
CUDA_CALL(hipMemcpyAsync(&pvals_[0], input, sizeof(DType) * prob_length,
hipMemcpyDeviceToHost, stream));
CUDA_CALL(hipStreamSynchronize(stream));
DType sum = DType(0.0);
for (int i = 0; i < prob_length; ++i) {
sum += pvals_[i];
CHECK(sum <= DType(1.0 + 1e-12))
<< "sum(pvals[:-1]) > 1.0";
}
}
NNVM_REGISTER_OP(_npi_multinomial)
.set_attr<FIsCUDAGraphsCompatible>("FIsCUDAGraphsCompatible",
[](const NodeAttrs&, const bool) {
return false;
})
.set_attr<FCompute>("FCompute<gpu>", NumpyMultinomialForward<gpu>);
} // namespace op
} // namespace mxnet
| fef3f93cacb1e2323f6352d4ac44410749c658aa.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file np_multinomial_op.cu
* \brief Operator for numpy sampling from multinomial distributions
*/
#include "./np_multinomial_op.h"
namespace mxnet {
namespace op {
template<typename DType>
void CheckPvalGPU(const OpContext& ctx, DType* input, int prob_length) {
std::vector<DType> pvals_(prob_length);
cudaStream_t stream = mshadow::Stream<gpu>::GetStream(ctx.get_stream<gpu>());
CUDA_CALL(cudaMemcpyAsync(&pvals_[0], input, sizeof(DType) * prob_length,
cudaMemcpyDeviceToHost, stream));
CUDA_CALL(cudaStreamSynchronize(stream));
DType sum = DType(0.0);
for (int i = 0; i < prob_length; ++i) {
sum += pvals_[i];
CHECK(sum <= DType(1.0 + 1e-12))
<< "sum(pvals[:-1]) > 1.0";
}
}
NNVM_REGISTER_OP(_npi_multinomial)
.set_attr<FIsCUDAGraphsCompatible>("FIsCUDAGraphsCompatible",
[](const NodeAttrs&, const bool) {
return false;
})
.set_attr<FCompute>("FCompute<gpu>", NumpyMultinomialForward<gpu>);
} // namespace op
} // namespace mxnet
|
a7e4ef64d3e6fd7f746a8828f836d044cf601e04.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <vector>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include "util.h"
#include "kernel.h"
#include <bits/stdc++.h>
#include <mpi.h>
using namespace std;
#define mpi_barrier() MPI_Barrier(MPI_COMM_WORLD);
long n_rows, n_cols, nnz;
int actv_row_size = 180;
int tile_sizeX = 256;
int tile_sizeY = 25000;
int k=100;
int SM_CAPACITY = 8192;
int BLOCKSIZE=512;
inline hipError_t checkCuda(hipError_t result, int s){
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", hipGetErrorString(result), s);
assert(result == hipSuccess);
}
return result;
}
void sddmm_GPU(int * d_row_ptr, int * d_row_ind, int *d_col_ind, float * d_val_ind, float * d_W, float *d_H,
int *d_tiled_ind, int *d_lastIdx, int *lastIdx_tile, int *d_lastIdx_block_tile ,int *d_active_row, int *d_passive_row,
int * count_actv_row, int &max_active_block, long new_nnz, int mpi_rank, int cols_in_rank, int nnz_in_rank, int col_loc){
int n_tile = n_cols/tile_sizeX + 1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t stream[n_tile];
for (int i = 0; i < n_tile; i++){
hipStreamCreate(&(stream[i]));
}
float mili =0, copyTime = 0 ;
dim3 block(BLOCKSIZE,1,1), grid(1,1,1);
int sum = 0, t_st =0 ;
int k_slice = SM_CAPACITY/actv_row_size;
int col_offset = col_loc ;
mpi_barrier();
// cout << "k_slice " << k_slice << endl;
checkCuda(hipEventRecord(start), __LINE__);
int nnz_tile = nnz_in_rank; // lastIdx_tile[mpi_rank+1]-lastIdx_tile[mpi_rank];
// if(mpi_rank == 1)
// cout << " after " << lastIdx_tile[mpi_rank] << " " << lastIdx_tile[mpi_rank+1]-lastIdx_tile[mpi_rank] << " "<< nnz_in_rank << endl;
int active_block_this_tile = count_actv_row[mpi_rank]/actv_row_size+1;
grid.x = active_block_this_tile;
// if(mpi_rank == 1)
// cout << "ranks "<<mpi_rank <<" nnz "<< nnz_tile << " grid "<<grid.x <<" "<<count_actv_row[mpi_rank ]<< endl;
// cout <<" ohh " <<mpi_rank <<" " << lastIdx_tile[mpi_rank] <<" " << lastIdx_tile[mpi_rank+1] << " " << tile_sizeX<< endl;
//if(mpi_rank == 0)
for (int t_st = 0; t_st < k ; t_st +=k_slice){
hipLaunchKernelGGL(( comp_kernel_COO), dim3(grid),dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H,
nnz, n_rows, n_cols, k, lastIdx_tile[mpi_rank], lastIdx_tile[mpi_rank+1], &(d_lastIdx_block_tile[(mpi_rank)*max_active_block]),
d_active_row, mpi_rank, t_st, count_actv_row[mpi_rank], actv_row_size, k_slice, col_offset);
}
//mpi_barrier();
checkCuda(hipEventRecord(stop), __LINE__);
hipEventSynchronize(stop);
// printf("\nTotal seconds: %.3f for rank = %d\n\n", seconds() - t0, mpi_rank);
//hipDeviceSynchronize();
checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__);
hipDeviceSynchronize();
cout << mili << " ";
mpi_barrier();
}
void sddmm_CPU_CSR(int * row_ptr, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){
// reduction(+:rmse)
long tot =0 ;
#pragma omp parallel for reduction(+:tot)
for (int r = 0; r < n_rows; ++r){
tot += row_ptr[r+1] - row_ptr[r];
float sm =0 ;
for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind){
int row = r;
int col = col_ind[ind];
int nnz = row_ptr[r+1]-row_ptr[r];
float val = val_ind[ind];
sm=0;
for (int t = 0; t < k; ++t){
sm += W[row * k + t] * H[col * k + t];
// cout <<W[row * k + t] <<" "<<H[col * k + t]<< endl;
}
p_ind[ind] = sm * val_ind[ind];
// cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl;
}
}
}
void sddmm_CPU_COO(int * row_ind, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){
// reduction(+:rmse)
double start_time = omp_get_wtime();
omp_set_dynamic(0);
omp_set_num_threads(28);
#pragma omp parallel for //reduction(+:tot)
for (int ind = 0; ind < nnz; ind++){
float sm =0 ;
int row = row_ind[ind];
int col = col_ind[ind];
for (int t = 0; t < k; ++t)
sm += W[row * k + t] * H[col * k + t];
p_ind[ind] = sm ;//* val_ind[ind];
// cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl;
// }
}
double CPU_time = omp_get_wtime() - start_time;
//correctness check
//cout << "CPU " << row_ind[1] <<" "<<col_ind[1] << endl;
// printf("\nomp time CPU : %.4f \n\n", CPU_time*1000);
}
void init(int *rows, int *cols, float* vals){
//**************************MPI init*************************
MPI_Init(NULL, NULL);
// Find out rank, size
int mpi_rank, n_proc;;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &n_proc);
int root_p = sqrt(n_proc+1);
int row_color = mpi_rank / root_p;
int col_color = mpi_rank % root_p;
MPI_Comm row_comm, col_comm;
MPI_Comm_split(MPI_COMM_WORLD, row_color, mpi_rank, &row_comm);
MPI_Comm_split(MPI_COMM_WORLD, col_color, mpi_rank, &col_comm);
int row_rank, row_size, col_rank, col_size;
MPI_Comm_rank(row_comm, &row_rank);
MPI_Comm_size(row_comm, &row_size);
MPI_Comm_rank(col_comm, &col_rank);
MPI_Comm_size(col_comm, &col_size);
//printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d %d/%d\n", mpi_rank, n_proc, row_rank, row_size, col_rank, col_size);
int n_bin=10;
int *count = new int[n_bin];
int *row_ptr = new int[n_rows+1];
int *col_ptr = new int[n_cols+1];
float *p_ind = new float[nnz];
float *W = new float[n_rows*k];
float *W_t = new float[n_rows*k];
float *H = new float[n_cols*k];
float *H_t = new float[n_cols*k];
int n_tile_c = n_proc;
int n_tile_r = n_rows/tile_sizeY + 1;
int max_active_block = (n_rows/actv_row_size+1);
int *count_actv_row = new int[n_tile_c];
int *lastIdx_tile = new int[n_tile_c+1];
int *lastIdx_block_tile = new int[(n_tile_c+1) * (n_rows/actv_row_size+1)];
float *d_val, *d_W, *d_H, *d_W_t;
int *d_row_ptr, *d_col_ind, *d_row_ind, *d_tiled_ind, *d_lastIdx, *d_active_row, *d_lastIdx_block_tile, *d_passive_row;
int n_tileX = n_proc;// n_cols/tile_sizeX+1;
int n_tileY = n_rows/tile_sizeY+1;
long new_nnz =0 ;
initial(W, n_rows, k);
initial(H, n_cols, k);
make_HTasH(H, H_t, n_cols, k);
make_HTasH(W, W_t, n_rows, k);
int *new_rows = new int[nnz];
int *new_cols = new int[nnz];
float *new_vals = new float[nnz];
int *row_holder = new int[n_rows];
int *tiled_ind = new int [nnz];
int *active_row = new int[n_tileX * n_rows];
int *passive_row = new int[n_tileX * n_rows];
// int *new_rows = new int[nnz];
// int *new_cols = new int[nnz];
// float *new_vals = new float[nnz];
//converting col sorted matrix to row sorted
//unsorted_make_CSR(rows, cols, vals, nnz, n_rows, n_cols, row_ptr);
//assuming sorted
int *cols_in_asym_rank = new int [n_proc];
int *nnz_in_asym_rank = new int [n_proc];
make_CSR(rows, cols, vals, nnz, n_rows, row_ptr, row_holder);
make_CSC(rows, cols, vals, nnz, n_rows, n_cols, col_ptr, mpi_rank, n_proc, cols_in_asym_rank, nnz_in_asym_rank);
int cols_in_rank = cols_in_asym_rank[mpi_rank];
//in MPI set tile sizeX as cols in rank
tile_sizeX = cols_in_rank;
int col_loc = cols_in_rank * mpi_rank;
//comp_bin(n_bin, count, n_rows, row_ptr, nnz);
int max_active_row=0;
mpi_barrier();
max_active_row = rewrite_matrix_1D(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols,
tile_sizeX, tiled_ind, lastIdx_tile, active_row, passive_row, count_actv_row, lastIdx_block_tile, actv_row_size,
new_nnz, row_holder, actv_row_size, cols_in_asym_rank, nnz_in_asym_rank, mpi_rank, n_proc);
mpi_barrier();
int nnz_in_rank = nnz_in_asym_rank[mpi_rank];//lastIdx_tile[mpi_rank+1] - lastIdx_tile[mpi_rank];
// if(mpi_rank == 1)
// cout <<" out " << lastIdx_tile[mpi_rank] <<" "<<
// lastIdx_tile[mpi_rank+1]<< endl;
int nnz_loc = lastIdx_tile[mpi_rank];
int nnz_loc_end = lastIdx_tile[mpi_rank+1];
mpi_barrier();
// cout << "sizes: "<<mpi_rank <<" " << tile_sizeX <<" nnz in rank "<< nnz_in_rank << " " <<nnz_loc<<
// " " << cols_in_rank <<endl;
mpi_barrier();
// <<" " << nnz_loc <<" " <<nnz_loc_end <<endl;
// rewrite_col_sorted_matrix(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols,
// tile_sizeX, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz);
double t0 = seconds();
// sddmm_CPU_CSR(row_ptr, cols, vals, W, H, p_ind);
sddmm_CPU_COO(rows, cols, vals, W, H, p_ind);
//***********Starting GPU****************
//if(mpi_rank == 0)
// checkCuda(hipMalloc((void**)&d_W, k*n_rows*sizeof(float)),0);
//********Allocate GPU memory********
checkCuda(hipMalloc((void**)&d_W, k*n_rows*sizeof(float)),0);
checkCuda(hipMalloc((void**)&d_H, k*n_cols*sizeof(float)),1);
checkCuda(hipMalloc((void**)&d_row_ind, nnz_in_rank*sizeof(int)),4);
checkCuda(hipMalloc((void**)&d_col_ind, nnz_in_rank*sizeof(int)),4);
checkCuda(hipMalloc((void**)&d_val, nnz_in_rank*sizeof(float)),4);
checkCuda(hipMalloc((void**)&d_lastIdx, (n_tile_c+1)*sizeof(float)),4);
checkCuda(hipMalloc((void**)&d_active_row, n_tileX*max_active_row*sizeof(int)),4);
checkCuda(hipMalloc((void**)&d_lastIdx_block_tile, n_tileX*max_active_block*sizeof(int)),4);
//******** Copy GPU memory********
checkCuda(hipMemcpy(d_row_ind, &(new_rows[nnz_loc]), nnz_in_rank*sizeof(int), hipMemcpyHostToDevice),4);
checkCuda(hipMemcpy(d_col_ind, &(new_cols[nnz_loc]), nnz_in_rank*sizeof(int), hipMemcpyHostToDevice),4);
//checkCuda(hipMemcpy(d_val, &(new_vals[0]), new_nnz*sizeof(float), hipMemcpyHostToDevice),4);
hipMemset(d_val, 0, nnz_in_rank*sizeof(float));
checkCuda(hipMemcpy(d_lastIdx, &(lastIdx_tile[0]), (n_tile_c+1)*sizeof(int), hipMemcpyHostToDevice),4);
for (int i = 0; i < n_tileX; ++i){
checkCuda(hipMemcpy(d_lastIdx_block_tile+i*max_active_block, &(lastIdx_block_tile[i*max_active_block]), max_active_block*sizeof(int), hipMemcpyHostToDevice),4);
//cout <<i<<" "<< lastIdx_tile[i]<<" "<<lastIdx_block_tile[i*max_active_block]<< endl;
mpi_barrier();
}
checkCuda(hipMemcpy(d_active_row, &(active_row[mpi_rank*n_rows]), count_actv_row[mpi_rank]*sizeof(int), hipMemcpyHostToDevice),4);
// int sum =0 ;
// for (int i = 0; i < n_tileX; ++i){
// checkCuda(hipMemcpy(d_active_row+sum, &(active_row[i*n_rows]), count_actv_row[i]*sizeof(int), hipMemcpyHostToDevice),4);
// mpi_barrier();
// sum += count_actv_row[i];
// }
hipMemcpy(d_W, &(W[0]), n_rows * k *sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(d_W, &(W_t[0]), n_rows * k *sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_H, &(H[0]), n_cols * k *sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(d_H, &(H[col_loc*k]), cols_in_rank * k *sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(d_H, &(H_t[0]), n_cols * k *sizeof(float), hipMemcpyHostToDevice);
mpi_barrier();
// if(mpi_rank == 1)
// cout << " before " << lastIdx_tile[mpi_rank] << " " << lastIdx_tile[mpi_rank+1];
sddmm_GPU(d_row_ptr, d_row_ind, d_col_ind, d_val, d_W, d_H, d_tiled_ind, d_lastIdx, lastIdx_tile, d_lastIdx_block_tile, d_active_row, d_passive_row ,count_actv_row,
max_active_block, new_nnz, mpi_rank, cols_in_rank, nnz_in_rank, col_loc );
//******** correctness check
mpi_barrier();
float GPU_tot = 0, CPU_tot =0, CPU_tot_orig =0 ;
float *p_ind_temp = new float[new_nnz];
// cout << "nz loc " << nnz_loc <<" " << nnz_in_rank << endl;
mpi_barrier();
checkCuda(hipMemcpy(&(p_ind_temp[nnz_loc]), d_val, nnz_in_rank *sizeof(float), hipMemcpyDeviceToHost),4);;
mpi_barrier();
for (int i = 0; i < nnz; ++i){
CPU_tot += p_ind[tiled_ind[i]];
CPU_tot_orig += p_ind[i];
// cout << "p_ind " << p_ind[tiled_ind[i]] << " " << p_ind[i] << " new,old ind: "<<tiled_ind[i] <<" "<<i<< endl;
}
// if(mpi_rank == 0)
// for (int i = nnz_loc; i < nnz_loc+2; ++i)
// cout << "rank and idx: " <<mpi_rank <<" " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl;
// // //mpi_barrier();
// for (int i = nnz_loc+nnz_in_rank -1; i > nnz_loc+nnz_in_rank -3 ; --i)
// cout << "rank and idx: " <<mpi_rank <<" " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl;
mpi_barrier();
//}
long diff_tot = 0;
for (int i = nnz_loc; i < nnz_in_rank+nnz_loc; ++i){
//if(p_ind_temp[i] != 0)
{
if(abs(p_ind_temp[i]-p_ind[tiled_ind[i]]) > .0001){
diff_tot ++;
//if(diff_tot < 5)
//if(mpi_rank == 1)
}
//if(mpi_rank == 1)
//printf("CPU GPU diff %d: %f %f %f \n", i, p_ind_temp[i], p_ind[tiled_ind[i]],p_ind_temp[i]-p_ind[tiled_ind[i]] );
}
}
mpi_barrier();
if(diff_tot > 0)
cout << "diff values in CPU and GPU in machine: " << mpi_rank <<" " << diff_tot<< endl;
MPI_Finalize();
cout << endl;
//freeing device allocation
hipFree( d_row_ptr );
hipFree( d_row_ind);
hipFree( d_col_ind);
hipFree( d_val);
hipFree(d_active_row);
hipFree(d_passive_row);
hipFree(d_lastIdx_block_tile);
hipFree(d_lastIdx);
hipFree( d_W );
hipFree( d_H );
delete(rows); delete(cols);
delete(vals);
}
int main(int argc, char* argv[]){
ifstream fp(argv[1]);
k = atoi(argv[2]);
tile_sizeY = atoi(argv[3]);
tile_sizeX = atoi(argv[4]);
actv_row_size = tile_sizeY;
string str;
fp >> str;
while(!isdigit(str[0])){
getline(fp,str);
}
istringstream is(str);
is >> n_rows;
is >> n_cols;
is >> nnz;
//fp >> n_rows >> n_cols >> nnz;
long orig_nnz=nnz, rid=0,cid=0; float vid=0;
int *rows = new int[nnz];
int *cols = new int[nnz];
float *vals = new float[nnz];
long idx=0;
for (long o_idx = 0; o_idx < orig_nnz; ++o_idx) {
fp >> rid >> cid >> vid;
rows[idx]=rid-1;
cols[idx]=cid-1;
vals[idx]=vid;
idx++;
}
//cout << "From main: "<<n_rows << " "<<n_cols <<" "<< nnz << " tile-size: " << tile_sizeX<< " k: "<<k << " TB: "<< BLOCKSIZE<< endl;
nnz=idx;
init(rows, cols, vals);
}
| a7e4ef64d3e6fd7f746a8828f836d044cf601e04.cu | #include <iostream>
#include <fstream>
#include <stdio.h>
#include <vector>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <cuda.h>
#include "util.h"
#include "kernel.h"
#include <bits/stdc++.h>
#include <mpi.h>
using namespace std;
#define mpi_barrier() MPI_Barrier(MPI_COMM_WORLD);
long n_rows, n_cols, nnz;
int actv_row_size = 180;
int tile_sizeX = 256;
int tile_sizeY = 25000;
int k=100;
int SM_CAPACITY = 8192;
int BLOCKSIZE=512;
inline cudaError_t checkCuda(cudaError_t result, int s){
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", cudaGetErrorString(result), s);
assert(result == cudaSuccess);
}
return result;
}
void sddmm_GPU(int * d_row_ptr, int * d_row_ind, int *d_col_ind, float * d_val_ind, float * d_W, float *d_H,
int *d_tiled_ind, int *d_lastIdx, int *lastIdx_tile, int *d_lastIdx_block_tile ,int *d_active_row, int *d_passive_row,
int * count_actv_row, int &max_active_block, long new_nnz, int mpi_rank, int cols_in_rank, int nnz_in_rank, int col_loc){
int n_tile = n_cols/tile_sizeX + 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t stream[n_tile];
for (int i = 0; i < n_tile; i++){
cudaStreamCreate(&(stream[i]));
}
float mili =0, copyTime = 0 ;
dim3 block(BLOCKSIZE,1,1), grid(1,1,1);
int sum = 0, t_st =0 ;
int k_slice = SM_CAPACITY/actv_row_size;
int col_offset = col_loc ;
mpi_barrier();
// cout << "k_slice " << k_slice << endl;
checkCuda(cudaEventRecord(start), __LINE__);
int nnz_tile = nnz_in_rank; // lastIdx_tile[mpi_rank+1]-lastIdx_tile[mpi_rank];
// if(mpi_rank == 1)
// cout << " after " << lastIdx_tile[mpi_rank] << " " << lastIdx_tile[mpi_rank+1]-lastIdx_tile[mpi_rank] << " "<< nnz_in_rank << endl;
int active_block_this_tile = count_actv_row[mpi_rank]/actv_row_size+1;
grid.x = active_block_this_tile;
// if(mpi_rank == 1)
// cout << "ranks "<<mpi_rank <<" nnz "<< nnz_tile << " grid "<<grid.x <<" "<<count_actv_row[mpi_rank ]<< endl;
// cout <<" ohh " <<mpi_rank <<" " << lastIdx_tile[mpi_rank] <<" " << lastIdx_tile[mpi_rank+1] << " " << tile_sizeX<< endl;
//if(mpi_rank == 0)
for (int t_st = 0; t_st < k ; t_st +=k_slice){
comp_kernel_COO<<<grid,block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H,
nnz, n_rows, n_cols, k, lastIdx_tile[mpi_rank], lastIdx_tile[mpi_rank+1], &(d_lastIdx_block_tile[(mpi_rank)*max_active_block]),
d_active_row, mpi_rank, t_st, count_actv_row[mpi_rank], actv_row_size, k_slice, col_offset);
}
//mpi_barrier();
checkCuda(cudaEventRecord(stop), __LINE__);
cudaEventSynchronize(stop);
// printf("\nTotal seconds: %.3f for rank = %d\n\n", seconds() - t0, mpi_rank);
//cudaDeviceSynchronize();
checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__);
cudaDeviceSynchronize();
cout << mili << " ";
mpi_barrier();
}
void sddmm_CPU_CSR(int * row_ptr, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){
// reduction(+:rmse)
long tot =0 ;
#pragma omp parallel for reduction(+:tot)
for (int r = 0; r < n_rows; ++r){
tot += row_ptr[r+1] - row_ptr[r];
float sm =0 ;
for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind){
int row = r;
int col = col_ind[ind];
int nnz = row_ptr[r+1]-row_ptr[r];
float val = val_ind[ind];
sm=0;
for (int t = 0; t < k; ++t){
sm += W[row * k + t] * H[col * k + t];
// cout <<W[row * k + t] <<" "<<H[col * k + t]<< endl;
}
p_ind[ind] = sm * val_ind[ind];
// cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl;
}
}
}
void sddmm_CPU_COO(int * row_ind, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){
// reduction(+:rmse)
double start_time = omp_get_wtime();
omp_set_dynamic(0);
omp_set_num_threads(28);
#pragma omp parallel for //reduction(+:tot)
for (int ind = 0; ind < nnz; ind++){
float sm =0 ;
int row = row_ind[ind];
int col = col_ind[ind];
for (int t = 0; t < k; ++t)
sm += W[row * k + t] * H[col * k + t];
p_ind[ind] = sm ;//* val_ind[ind];
// cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl;
// }
}
double CPU_time = omp_get_wtime() - start_time;
//correctness check
//cout << "CPU " << row_ind[1] <<" "<<col_ind[1] << endl;
// printf("\nomp time CPU : %.4f \n\n", CPU_time*1000);
}
void init(int *rows, int *cols, float* vals){
//**************************MPI init*************************
MPI_Init(NULL, NULL);
// Find out rank, size
int mpi_rank, n_proc;;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &n_proc);
int root_p = sqrt(n_proc+1);
int row_color = mpi_rank / root_p;
int col_color = mpi_rank % root_p;
MPI_Comm row_comm, col_comm;
MPI_Comm_split(MPI_COMM_WORLD, row_color, mpi_rank, &row_comm);
MPI_Comm_split(MPI_COMM_WORLD, col_color, mpi_rank, &col_comm);
int row_rank, row_size, col_rank, col_size;
MPI_Comm_rank(row_comm, &row_rank);
MPI_Comm_size(row_comm, &row_size);
MPI_Comm_rank(col_comm, &col_rank);
MPI_Comm_size(col_comm, &col_size);
//printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d %d/%d\n", mpi_rank, n_proc, row_rank, row_size, col_rank, col_size);
int n_bin=10;
int *count = new int[n_bin];
int *row_ptr = new int[n_rows+1];
int *col_ptr = new int[n_cols+1];
float *p_ind = new float[nnz];
float *W = new float[n_rows*k];
float *W_t = new float[n_rows*k];
float *H = new float[n_cols*k];
float *H_t = new float[n_cols*k];
int n_tile_c = n_proc;
int n_tile_r = n_rows/tile_sizeY + 1;
int max_active_block = (n_rows/actv_row_size+1);
int *count_actv_row = new int[n_tile_c];
int *lastIdx_tile = new int[n_tile_c+1];
int *lastIdx_block_tile = new int[(n_tile_c+1) * (n_rows/actv_row_size+1)];
float *d_val, *d_W, *d_H, *d_W_t;
int *d_row_ptr, *d_col_ind, *d_row_ind, *d_tiled_ind, *d_lastIdx, *d_active_row, *d_lastIdx_block_tile, *d_passive_row;
int n_tileX = n_proc;// n_cols/tile_sizeX+1;
int n_tileY = n_rows/tile_sizeY+1;
long new_nnz =0 ;
initial(W, n_rows, k);
initial(H, n_cols, k);
make_HTasH(H, H_t, n_cols, k);
make_HTasH(W, W_t, n_rows, k);
int *new_rows = new int[nnz];
int *new_cols = new int[nnz];
float *new_vals = new float[nnz];
int *row_holder = new int[n_rows];
int *tiled_ind = new int [nnz];
int *active_row = new int[n_tileX * n_rows];
int *passive_row = new int[n_tileX * n_rows];
// int *new_rows = new int[nnz];
// int *new_cols = new int[nnz];
// float *new_vals = new float[nnz];
//converting col sorted matrix to row sorted
//unsorted_make_CSR(rows, cols, vals, nnz, n_rows, n_cols, row_ptr);
//assuming sorted
int *cols_in_asym_rank = new int [n_proc];
int *nnz_in_asym_rank = new int [n_proc];
make_CSR(rows, cols, vals, nnz, n_rows, row_ptr, row_holder);
make_CSC(rows, cols, vals, nnz, n_rows, n_cols, col_ptr, mpi_rank, n_proc, cols_in_asym_rank, nnz_in_asym_rank);
int cols_in_rank = cols_in_asym_rank[mpi_rank];
//in MPI set tile sizeX as cols in rank
tile_sizeX = cols_in_rank;
int col_loc = cols_in_rank * mpi_rank;
//comp_bin(n_bin, count, n_rows, row_ptr, nnz);
int max_active_row=0;
mpi_barrier();
max_active_row = rewrite_matrix_1D(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols,
tile_sizeX, tiled_ind, lastIdx_tile, active_row, passive_row, count_actv_row, lastIdx_block_tile, actv_row_size,
new_nnz, row_holder, actv_row_size, cols_in_asym_rank, nnz_in_asym_rank, mpi_rank, n_proc);
mpi_barrier();
int nnz_in_rank = nnz_in_asym_rank[mpi_rank];//lastIdx_tile[mpi_rank+1] - lastIdx_tile[mpi_rank];
// if(mpi_rank == 1)
// cout <<" out " << lastIdx_tile[mpi_rank] <<" "<<
// lastIdx_tile[mpi_rank+1]<< endl;
int nnz_loc = lastIdx_tile[mpi_rank];
int nnz_loc_end = lastIdx_tile[mpi_rank+1];
mpi_barrier();
// cout << "sizes: "<<mpi_rank <<" " << tile_sizeX <<" nnz in rank "<< nnz_in_rank << " " <<nnz_loc<<
// " " << cols_in_rank <<endl;
mpi_barrier();
// <<" " << nnz_loc <<" " <<nnz_loc_end <<endl;
// rewrite_col_sorted_matrix(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols,
// tile_sizeX, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz);
double t0 = seconds();
// sddmm_CPU_CSR(row_ptr, cols, vals, W, H, p_ind);
sddmm_CPU_COO(rows, cols, vals, W, H, p_ind);
//***********Starting GPU****************
//if(mpi_rank == 0)
// checkCuda(cudaMalloc((void**)&d_W, k*n_rows*sizeof(float)),0);
//********Allocate GPU memory********
checkCuda(cudaMalloc((void**)&d_W, k*n_rows*sizeof(float)),0);
checkCuda(cudaMalloc((void**)&d_H, k*n_cols*sizeof(float)),1);
checkCuda(cudaMalloc((void**)&d_row_ind, nnz_in_rank*sizeof(int)),4);
checkCuda(cudaMalloc((void**)&d_col_ind, nnz_in_rank*sizeof(int)),4);
checkCuda(cudaMalloc((void**)&d_val, nnz_in_rank*sizeof(float)),4);
checkCuda(cudaMalloc((void**)&d_lastIdx, (n_tile_c+1)*sizeof(float)),4);
checkCuda(cudaMalloc((void**)&d_active_row, n_tileX*max_active_row*sizeof(int)),4);
checkCuda(cudaMalloc((void**)&d_lastIdx_block_tile, n_tileX*max_active_block*sizeof(int)),4);
//******** Copy GPU memory********
checkCuda(cudaMemcpy(d_row_ind, &(new_rows[nnz_loc]), nnz_in_rank*sizeof(int), cudaMemcpyHostToDevice),4);
checkCuda(cudaMemcpy(d_col_ind, &(new_cols[nnz_loc]), nnz_in_rank*sizeof(int), cudaMemcpyHostToDevice),4);
//checkCuda(cudaMemcpy(d_val, &(new_vals[0]), new_nnz*sizeof(float), cudaMemcpyHostToDevice),4);
cudaMemset(d_val, 0, nnz_in_rank*sizeof(float));
checkCuda(cudaMemcpy(d_lastIdx, &(lastIdx_tile[0]), (n_tile_c+1)*sizeof(int), cudaMemcpyHostToDevice),4);
for (int i = 0; i < n_tileX; ++i){
checkCuda(cudaMemcpy(d_lastIdx_block_tile+i*max_active_block, &(lastIdx_block_tile[i*max_active_block]), max_active_block*sizeof(int), cudaMemcpyHostToDevice),4);
//cout <<i<<" "<< lastIdx_tile[i]<<" "<<lastIdx_block_tile[i*max_active_block]<< endl;
mpi_barrier();
}
checkCuda(cudaMemcpy(d_active_row, &(active_row[mpi_rank*n_rows]), count_actv_row[mpi_rank]*sizeof(int), cudaMemcpyHostToDevice),4);
// int sum =0 ;
// for (int i = 0; i < n_tileX; ++i){
// checkCuda(cudaMemcpy(d_active_row+sum, &(active_row[i*n_rows]), count_actv_row[i]*sizeof(int), cudaMemcpyHostToDevice),4);
// mpi_barrier();
// sum += count_actv_row[i];
// }
cudaMemcpy(d_W, &(W[0]), n_rows * k *sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_W, &(W_t[0]), n_rows * k *sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_H, &(H[0]), n_cols * k *sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_H, &(H[col_loc*k]), cols_in_rank * k *sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_H, &(H_t[0]), n_cols * k *sizeof(float), cudaMemcpyHostToDevice);
mpi_barrier();
// if(mpi_rank == 1)
// cout << " before " << lastIdx_tile[mpi_rank] << " " << lastIdx_tile[mpi_rank+1];
sddmm_GPU(d_row_ptr, d_row_ind, d_col_ind, d_val, d_W, d_H, d_tiled_ind, d_lastIdx, lastIdx_tile, d_lastIdx_block_tile, d_active_row, d_passive_row ,count_actv_row,
max_active_block, new_nnz, mpi_rank, cols_in_rank, nnz_in_rank, col_loc );
//******** correctness check
mpi_barrier();
float GPU_tot = 0, CPU_tot =0, CPU_tot_orig =0 ;
float *p_ind_temp = new float[new_nnz];
// cout << "nz loc " << nnz_loc <<" " << nnz_in_rank << endl;
mpi_barrier();
checkCuda(cudaMemcpy(&(p_ind_temp[nnz_loc]), d_val, nnz_in_rank *sizeof(float), cudaMemcpyDeviceToHost),4);;
mpi_barrier();
for (int i = 0; i < nnz; ++i){
CPU_tot += p_ind[tiled_ind[i]];
CPU_tot_orig += p_ind[i];
// cout << "p_ind " << p_ind[tiled_ind[i]] << " " << p_ind[i] << " new,old ind: "<<tiled_ind[i] <<" "<<i<< endl;
}
// if(mpi_rank == 0)
// for (int i = nnz_loc; i < nnz_loc+2; ++i)
// cout << "rank and idx: " <<mpi_rank <<" " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl;
// // //mpi_barrier();
// for (int i = nnz_loc+nnz_in_rank -1; i > nnz_loc+nnz_in_rank -3 ; --i)
// cout << "rank and idx: " <<mpi_rank <<" " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl;
mpi_barrier();
//}
long diff_tot = 0;
for (int i = nnz_loc; i < nnz_in_rank+nnz_loc; ++i){
//if(p_ind_temp[i] != 0)
{
if(abs(p_ind_temp[i]-p_ind[tiled_ind[i]]) > .0001){
diff_tot ++;
//if(diff_tot < 5)
//if(mpi_rank == 1)
}
//if(mpi_rank == 1)
//printf("CPU GPU diff %d: %f %f %f \n", i, p_ind_temp[i], p_ind[tiled_ind[i]],p_ind_temp[i]-p_ind[tiled_ind[i]] );
}
}
mpi_barrier();
if(diff_tot > 0)
cout << "diff values in CPU and GPU in machine: " << mpi_rank <<" " << diff_tot<< endl;
MPI_Finalize();
cout << endl;
//freeing device allocation
cudaFree( d_row_ptr );
cudaFree( d_row_ind);
cudaFree( d_col_ind);
cudaFree( d_val);
cudaFree(d_active_row);
cudaFree(d_passive_row);
cudaFree(d_lastIdx_block_tile);
cudaFree(d_lastIdx);
cudaFree( d_W );
cudaFree( d_H );
delete(rows); delete(cols);
delete(vals);
}
int main(int argc, char* argv[]){
ifstream fp(argv[1]);
k = atoi(argv[2]);
tile_sizeY = atoi(argv[3]);
tile_sizeX = atoi(argv[4]);
actv_row_size = tile_sizeY;
string str;
fp >> str;
while(!isdigit(str[0])){
getline(fp,str);
}
istringstream is(str);
is >> n_rows;
is >> n_cols;
is >> nnz;
//fp >> n_rows >> n_cols >> nnz;
long orig_nnz=nnz, rid=0,cid=0; float vid=0;
int *rows = new int[nnz];
int *cols = new int[nnz];
float *vals = new float[nnz];
long idx=0;
for (long o_idx = 0; o_idx < orig_nnz; ++o_idx) {
fp >> rid >> cid >> vid;
rows[idx]=rid-1;
cols[idx]=cid-1;
vals[idx]=vid;
idx++;
}
//cout << "From main: "<<n_rows << " "<<n_cols <<" "<< nnz << " tile-size: " << tile_sizeX<< " k: "<<k << " TB: "<< BLOCKSIZE<< endl;
nnz=idx;
init(rows, cols, vals);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.