hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
29d61e970d6986cc5daa09ea28a1c3ab5c14bfea.hip | // !!! This is a file automatically generated by hipify!!!
// wave 2D GPU
// nvcc -arch=sm_70 -O3 wave_2D_Vis_v2.cu
// run: ./a.out
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "hip/hip_runtime.h"
// #define USE_SINGLE_PRECISION /* Comment this line using "//" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define GPU_ID 0
#define OVERLENGTH_X 1
#define OVERLENGTH_Y 1
#define zeros(A,nx,ny) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc(((nx)*(ny))*sizeof(DAT)); \
for(i=0; i < ((nx)*(ny)); i++){ A##_h[i]=(DAT)0.0; } \
hipMalloc(&A##_d ,((nx)*(ny))*sizeof(DAT)); \
hipMemcpy( A##_d,A##_h,((nx)*(ny))*sizeof(DAT),hipMemcpyHostToDevice);
#define free_all(A) free(A##_h); hipFree(A##_d);
#define gather(A,nx,ny) hipMemcpy( A##_h,A##_d,((nx)*(ny))*sizeof(DAT),hipMemcpyDeviceToHost);
// --------------------------------------------------------------------- //
// Physics
const DAT Lx = 10.0;
const DAT Ly = 10.0;
const DAT k = 1.0;
const DAT rho = 1.0;
const DAT mu = 1.0;
// Numerics
#define BLOCK_X 32
#define BLOCK_Y 32
#define GRID_X 4
#define GRID_Y 4
const int nx = BLOCK_X*GRID_X - OVERLENGTH_X;
const int ny = BLOCK_Y*GRID_Y - OVERLENGTH_Y;
const int nt = 20000;
const DAT dx = Lx/((DAT)nx);
const DAT dy = Ly/((DAT)ny);
const DAT dt = (min(dx,dy)*min(dx,dy))/(mu*4.1*3*4);
// --------------------------------------------------------------------- //
void save_info(int me, const int nx, const int ny){
FILE* fid;
if (me==0){ fid=fopen("0_nxy.inf" ,"w"); fprintf(fid,"%d %d %d", PRECIS, nx, ny); fclose(fid); }
}
#define save_info() save_info(me, nx, ny);
void save_array(DAT* A, int nx, int ny, int me, const char A_name[]){
char* fname; FILE* fid; asprintf(&fname, "%d_%s.res" , me, A_name);
fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), (nx)*(ny), fid); fclose(fid); free(fname);
}
#define SaveArray(A,nx,ny,A_name) gather(A,nx,ny); save_array(A##_h,nx,ny,me,A_name);
void clean_cuda(){
hipError_t ce = hipGetLastError();
if(ce != hipSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", hipGetErrorString(ce)); hipDeviceReset();}
}
// --------------------------------------------------------------------- //
// Computing physics kernels
__global__ void init(DAT* x, DAT* y, DAT* P, const DAT Lx, const DAT Ly, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix<nx){ x[ix + iy*nx] = (DAT)ix*dx + (-Lx+dx)/2.0; }
if (iy<ny && ix<nx){ y[ix + iy*nx] = (DAT)iy*dy + (-Ly+dy)/2.0; }
if (iy<ny && ix<nx){ P[ix + iy*nx] = exp(-(x[ix + iy*nx]*x[ix + iy*nx]) -(y[ix + iy*nx]*y[ix + iy*nx])); }
}
__global__ void compute_V(DAT* Vx, DAT* Vy, DAT* P, DAT* Txx, DAT* Tyy, DAT* Txy, const DAT dt, const DAT rho, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix>0 && ix<nx){
Vx[ix+(iy)*(nx+1)] = Vx[ix+(iy)*(nx+1)] + dt/rho*(
-1*(P[ix+(iy)*nx]-P[(ix-1)+(iy)*nx])/dx
+ (Txx[ix+(iy)*nx] - Txx[(ix-1)+(iy)*nx])/dx
+ (Txy[ix+(iy+1)*(nx+1)] - Txy[ix+(iy)*(nx+1)])/dy);
}
if (iy>0 && iy<ny && ix<nx){
Vy[ix+(iy)*(nx)] = Vy[ix+(iy)*(nx)] + dt/rho*(
-1*(P[ix+(iy)*nx]-P[ix+(iy-1)*nx])/dy
+ (Tyy[ix+(iy)*nx] - Tyy[ix+(iy-1)*nx])/dy
+ (Txy[(ix+1)+(iy)*(nx+1)] - Txy[ix+(iy)*(nx+1)])/dx);
}
}
__global__ void compute_P(DAT* Vx, DAT* Vy, DAT* P, const DAT dt, const DAT k, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix<nx){
P[ix + iy*nx] = P[ix+(iy)*nx] - dt*k*((Vx[(ix+1)+(iy)*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx+
(Vy[ix+(iy+1)*(nx)]-Vy[ix+(iy)*(nx)])/dy);
}
}
__global__ void compute_T(DAT* Vx, DAT* Vy, DAT* P, DAT* Txx, DAT* Tyy, DAT* Txy, const DAT mu, const DAT dt, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix<nx){
Txx[ix+(iy)*nx] = 2*mu*(
(Vx[(ix+1)+(iy )*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx -
((Vx[(ix+1)+(iy )*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx +
(Vy[ ix +(iy+1)*(nx )]-Vy[ix+(iy)*(nx )])/dy)/((DAT)3));
Tyy[ix+(iy)*nx] = 2*mu*(
(Vy[ ix +(iy+1)*(nx )]-Vy[ix+(iy)*(nx )])/dy -
((Vx[(ix+1)+(iy )*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx +
(Vy[ ix +(iy+1)*(nx )]-Vy[ix+(iy)*(nx )])/dy)/((DAT)3));
}
if(iy<ny && ix<nx && ix>0 && iy >0){
Txy[ix+(iy)*(nx+1)] = mu*(
(Vx[ix+(iy)*(nx+1)] - Vx[ ix +(iy-1)*(nx+1)])/dy +
(Vy[ix+(iy)*(nx )] - Vy[(ix-1)+(iy )*(nx )])/dx);
}
}
int main(){
int i, it;
// Set up GPU
int gpu_id=-1;
int me = 0;
dim3 grid, block;
block.x = BLOCK_X; grid.x = GRID_X;
block.y = BLOCK_Y; grid.y = GRID_Y;
gpu_id = GPU_ID; hipSetDevice(gpu_id); hipGetDevice(&gpu_id);
hipDeviceReset(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); // set L1 to prefered
if (me==0){ printf("Process uses GPU with id %d.\n",gpu_id); }
// Initial arrays
zeros(x ,nx ,ny );
zeros(y ,nx ,ny );
zeros(P ,nx ,ny );
zeros(Vx ,nx+1,ny );
zeros(Vy ,nx ,ny+1);
zeros(Txx,(nx )*(ny ),1);
zeros(Tyy,(nx )*(ny ),1);
zeros(Txy,(nx+1)*(ny+1),1);
// Initial conditions
hipLaunchKernelGGL(( init), dim3(grid),dim3(block), 0, 0, x_d, y_d, P_d, Lx, Ly, dx, dy, nx, ny); hipDeviceSynchronize();
// Action
for (it=0;it<nt;it++){
hipLaunchKernelGGL(( compute_V), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, P_d, Txx_d, Tyy_d, Txy_d, dt, rho, dx, dy, nx, ny); hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_P), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, P_d, dt, k, dx, dy, nx, ny); hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_T), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, P_d, Txx_d, Tyy_d, Txy_d, mu, dt, dx, dy, nx, ny); hipDeviceSynchronize();
}//it
save_info();
SaveArray(P ,nx ,ny ,"P" );
SaveArray(Vx,nx+1,ny ,"Vx");
SaveArray(Vy,nx ,ny+1,"Vy");
SaveArray(Txx,nx ,ny ,"Txx");
SaveArray(Tyy,nx ,ny ,"Tyy");
SaveArray(Txy,nx+1,ny+1,"Txy");
free_all(x );
free_all(y );
free_all(P );
free_all(Vx);
free_all(Vy);
free_all(Txx);
free_all(Tyy);
free_all(Txy);
clean_cuda();
}
| 29d61e970d6986cc5daa09ea28a1c3ab5c14bfea.cu | // wave 2D GPU
// nvcc -arch=sm_70 -O3 wave_2D_Vis_v2.cu
// run: ./a.out
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "cuda.h"
// #define USE_SINGLE_PRECISION /* Comment this line using "//" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define GPU_ID 0
#define OVERLENGTH_X 1
#define OVERLENGTH_Y 1
#define zeros(A,nx,ny) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc(((nx)*(ny))*sizeof(DAT)); \
for(i=0; i < ((nx)*(ny)); i++){ A##_h[i]=(DAT)0.0; } \
cudaMalloc(&A##_d ,((nx)*(ny))*sizeof(DAT)); \
cudaMemcpy( A##_d,A##_h,((nx)*(ny))*sizeof(DAT),cudaMemcpyHostToDevice);
#define free_all(A) free(A##_h); cudaFree(A##_d);
#define gather(A,nx,ny) cudaMemcpy( A##_h,A##_d,((nx)*(ny))*sizeof(DAT),cudaMemcpyDeviceToHost);
// --------------------------------------------------------------------- //
// Physics
const DAT Lx = 10.0;
const DAT Ly = 10.0;
const DAT k = 1.0;
const DAT rho = 1.0;
const DAT mu = 1.0;
// Numerics
#define BLOCK_X 32
#define BLOCK_Y 32
#define GRID_X 4
#define GRID_Y 4
const int nx = BLOCK_X*GRID_X - OVERLENGTH_X;
const int ny = BLOCK_Y*GRID_Y - OVERLENGTH_Y;
const int nt = 20000;
const DAT dx = Lx/((DAT)nx);
const DAT dy = Ly/((DAT)ny);
const DAT dt = (min(dx,dy)*min(dx,dy))/(mu*4.1*3*4);
// --------------------------------------------------------------------- //
void save_info(int me, const int nx, const int ny){
FILE* fid;
if (me==0){ fid=fopen("0_nxy.inf" ,"w"); fprintf(fid,"%d %d %d", PRECIS, nx, ny); fclose(fid); }
}
#define save_info() save_info(me, nx, ny);
void save_array(DAT* A, int nx, int ny, int me, const char A_name[]){
char* fname; FILE* fid; asprintf(&fname, "%d_%s.res" , me, A_name);
fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), (nx)*(ny), fid); fclose(fid); free(fname);
}
#define SaveArray(A,nx,ny,A_name) gather(A,nx,ny); save_array(A##_h,nx,ny,me,A_name);
void clean_cuda(){
cudaError_t ce = cudaGetLastError();
if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();}
}
// --------------------------------------------------------------------- //
// Computing physics kernels
__global__ void init(DAT* x, DAT* y, DAT* P, const DAT Lx, const DAT Ly, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix<nx){ x[ix + iy*nx] = (DAT)ix*dx + (-Lx+dx)/2.0; }
if (iy<ny && ix<nx){ y[ix + iy*nx] = (DAT)iy*dy + (-Ly+dy)/2.0; }
if (iy<ny && ix<nx){ P[ix + iy*nx] = exp(-(x[ix + iy*nx]*x[ix + iy*nx]) -(y[ix + iy*nx]*y[ix + iy*nx])); }
}
__global__ void compute_V(DAT* Vx, DAT* Vy, DAT* P, DAT* Txx, DAT* Tyy, DAT* Txy, const DAT dt, const DAT rho, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix>0 && ix<nx){
Vx[ix+(iy)*(nx+1)] = Vx[ix+(iy)*(nx+1)] + dt/rho*(
-1*(P[ix+(iy)*nx]-P[(ix-1)+(iy)*nx])/dx
+ (Txx[ix+(iy)*nx] - Txx[(ix-1)+(iy)*nx])/dx
+ (Txy[ix+(iy+1)*(nx+1)] - Txy[ix+(iy)*(nx+1)])/dy);
}
if (iy>0 && iy<ny && ix<nx){
Vy[ix+(iy)*(nx)] = Vy[ix+(iy)*(nx)] + dt/rho*(
-1*(P[ix+(iy)*nx]-P[ix+(iy-1)*nx])/dy
+ (Tyy[ix+(iy)*nx] - Tyy[ix+(iy-1)*nx])/dy
+ (Txy[(ix+1)+(iy)*(nx+1)] - Txy[ix+(iy)*(nx+1)])/dx);
}
}
__global__ void compute_P(DAT* Vx, DAT* Vy, DAT* P, const DAT dt, const DAT k, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix<nx){
P[ix + iy*nx] = P[ix+(iy)*nx] - dt*k*((Vx[(ix+1)+(iy)*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx+
(Vy[ix+(iy+1)*(nx)]-Vy[ix+(iy)*(nx)])/dy);
}
}
__global__ void compute_T(DAT* Vx, DAT* Vy, DAT* P, DAT* Txx, DAT* Tyy, DAT* Txy, const DAT mu, const DAT dt, const DAT dx, const DAT dy, const int nx, const int ny){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y
if (iy<ny && ix<nx){
Txx[ix+(iy)*nx] = 2*mu*(
(Vx[(ix+1)+(iy )*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx -
((Vx[(ix+1)+(iy )*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx +
(Vy[ ix +(iy+1)*(nx )]-Vy[ix+(iy)*(nx )])/dy)/((DAT)3));
Tyy[ix+(iy)*nx] = 2*mu*(
(Vy[ ix +(iy+1)*(nx )]-Vy[ix+(iy)*(nx )])/dy -
((Vx[(ix+1)+(iy )*(nx+1)]-Vx[ix+(iy)*(nx+1)])/dx +
(Vy[ ix +(iy+1)*(nx )]-Vy[ix+(iy)*(nx )])/dy)/((DAT)3));
}
if(iy<ny && ix<nx && ix>0 && iy >0){
Txy[ix+(iy)*(nx+1)] = mu*(
(Vx[ix+(iy)*(nx+1)] - Vx[ ix +(iy-1)*(nx+1)])/dy +
(Vy[ix+(iy)*(nx )] - Vy[(ix-1)+(iy )*(nx )])/dx);
}
}
int main(){
int i, it;
// Set up GPU
int gpu_id=-1;
int me = 0;
dim3 grid, block;
block.x = BLOCK_X; grid.x = GRID_X;
block.y = BLOCK_Y; grid.y = GRID_Y;
gpu_id = GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id);
cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered
if (me==0){ printf("Process uses GPU with id %d.\n",gpu_id); }
// Initial arrays
zeros(x ,nx ,ny );
zeros(y ,nx ,ny );
zeros(P ,nx ,ny );
zeros(Vx ,nx+1,ny );
zeros(Vy ,nx ,ny+1);
zeros(Txx,(nx )*(ny ),1);
zeros(Tyy,(nx )*(ny ),1);
zeros(Txy,(nx+1)*(ny+1),1);
// Initial conditions
init<<<grid,block>>>(x_d, y_d, P_d, Lx, Ly, dx, dy, nx, ny); cudaDeviceSynchronize();
// Action
for (it=0;it<nt;it++){
compute_V<<<grid,block>>>(Vx_d, Vy_d, P_d, Txx_d, Tyy_d, Txy_d, dt, rho, dx, dy, nx, ny); cudaDeviceSynchronize();
compute_P<<<grid,block>>>(Vx_d, Vy_d, P_d, dt, k, dx, dy, nx, ny); cudaDeviceSynchronize();
compute_T<<<grid,block>>>(Vx_d, Vy_d, P_d, Txx_d, Tyy_d, Txy_d, mu, dt, dx, dy, nx, ny); cudaDeviceSynchronize();
}//it
save_info();
SaveArray(P ,nx ,ny ,"P" );
SaveArray(Vx,nx+1,ny ,"Vx");
SaveArray(Vy,nx ,ny+1,"Vy");
SaveArray(Txx,nx ,ny ,"Txx");
SaveArray(Tyy,nx ,ny ,"Tyy");
SaveArray(Txy,nx+1,ny+1,"Txy");
free_all(x );
free_all(y );
free_all(P );
free_all(Vx);
free_all(Vy);
free_all(Txx);
free_all(Tyy);
free_all(Txy);
clean_cuda();
}
|
811c40a8af53867df6280e63bf614d36c10ba1b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <hip/hip_vector_types.h>
#include <optix_device.h>
#include "optixHello.h"
#include "helpers.h"
#include "random.h"
//bool is_vpl = false;
#define MIN_DIST2 0.1f//*vpl_dist_scale_square
#define VPL_SHADOW_OFFSET 10.f
extern "C" {
__constant__ Params params;
}
//_________BASIC FUNCTIONS (UTILS)_______________________________________________________________________
__device__ __inline__ float Clamp(float val, float low, float high) {
if (val < low) return low;
else if (val > high) return high;
else return val;
}
__device__ __inline__ float SmoothStep(float min, float max, float value) {
float v = Clamp((value - min) / (max - min), 0.f, 1.f);
return v * v * (-2.f * v + 3.f);
}
__device__ __inline__ float map(float value, float start1, float stop1, float start2, float stop2) {
return start2 + (stop2 - start2) * ((value - start1) / (stop1 - start1));
}
//_______________________________________________________________________
//------------------GET AND SET RAY INFO------------------
static __device__ __inline__ RadiancePRD getRadiancePRD()
{
RadiancePRD prd;
prd.result.x = int_as_float(optixGetPayload_0());
prd.result.y = int_as_float(optixGetPayload_1());
prd.result.z = int_as_float(optixGetPayload_2());
prd.light_number = int_as_float(optixGetPayload_3());
prd.depth = optixGetPayload_4();
return prd;
}
static __device__ __inline__ void setRadiancePRD(const RadiancePRD &prd)
{
optixSetPayload_0(float_as_int(prd.result.x));
optixSetPayload_1(float_as_int(prd.result.y));
optixSetPayload_2(float_as_int(prd.result.z));
optixSetPayload_3(float_as_int(prd.light_number));
optixSetPayload_4(prd.depth);
}
static __device__ __inline__ OcclusionPRD getOcclusionPRD()
{
OcclusionPRD prd;
prd.attenuation.x = int_as_float(optixGetPayload_0());
prd.attenuation.y = int_as_float(optixGetPayload_1());
prd.attenuation.z = int_as_float(optixGetPayload_2());
return prd;
}
static __device__ __inline__ void setOcclusionPRD(const OcclusionPRD &prd)
{
optixSetPayload_0(float_as_int(prd.attenuation.x));
optixSetPayload_1(float_as_int(prd.attenuation.y));
optixSetPayload_2(float_as_int(prd.attenuation.z));
}
//-------------------------------------------------------
//Compute ray value when ray misses the scene
__device__ void phongShadowed()
{
// this material is opaque, so it fully attenuates all shadow rays
OcclusionPRD prd;
prd.attenuation = make_float3(0.f);
setOcclusionPRD(prd);
optixTerminateRay();
}
//Function to mark where the VPL are ins the scene
__device__ __inline__ float3 show_VPL(float3 hit_point) {
float3 vpl_pos_color = make_float3(0.);
for (int i = 0; i < params.num_hit_vpl; i++)
{
VPL showvpl = params.vpls[i];
float3 pos_vpl = showvpl.pos;
float dist = length(hit_point - pos_vpl);
if (dist < 0.05)
{
vpl_pos_color = make_float3(1.);//showvpl.color;// params.cluster_color[params.VPL_assing_cluster[i]];
//vpl_pos_color = make_float3(1.);
break;
}
}
return vpl_pos_color;
}
//Comoute direct illumination of each light source2
__device__ __inline__
float3 direct_light_contribution(float3 hit_point, float3 p_normal, float3 p_Kd, BasicLight current_light) {
float3 direct_contribution = make_float3(0.);
BasicLight light = current_light;
float Ldist = length(light.pos - hit_point);
float3 L = normalize(light.pos - hit_point);
float nDl = dot(p_normal, L);
// cast shadow ray
float3 light_attenuation = make_float3(static_cast<float>(nDl > 0.0f));
if (nDl > 0.0f)
{
OcclusionPRD shadow_prd;
shadow_prd.attenuation = make_float3(1.0f);
optixTrace(
params.handle,
hit_point,
L,
0.01f,
Ldist,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(shadow_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
light_attenuation = shadow_prd.attenuation;
}
// If not completely shadowed, light the hit point
if (fmaxf(light_attenuation) > 0.0f)
{
float3 Lc = light.color * light_attenuation;
direct_contribution += p_Kd * nDl * Lc;
}
return direct_contribution;
}
//Compute irradiance of each VPL.
__device__ __inline__
float3 VPL_contribution(float3 hit_point, float3 p_normal, float3 p_Kd, VPL current_VPL) {
float3 irradiance = make_float3(0.);
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(current_VPL.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(current_VPL.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - current_VPL.pos);
float nvDl2 = dot(current_VPL.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
float3 result = current_VPL.color * G *distScale;
if (length(result) > 0.02) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
}
irradiance += result * visible;
}
}
return irradiance;
}
//Compute the total illumination of one point.
static
__device__ void phongShade(float3 p_Kd,
float3 p_normal)
{
//Extract ray and thread information
const uint3 idx = optixGetLaunchIndex();
const uint32_t image_index = params.width*idx.y + idx.x;
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
RadiancePRD prd = getRadiancePRD();
//Compute hit point
float3 hit_point = ray_orig + ray_t * ray_dir;
//Init different types of illumiantion
float3 final_direct = make_float3(0, 0, 0);
float3 final_indirect = make_float3(0.f);
//Init VPL show color
float3 vpl_pos_color = make_float3(0.);
//Show the VPL hit position
if (params.s_v)
{
vpl_pos_color = show_VPL(hit_point);
}
//Compute the direct illuminatios
if (params.s_d)
{
for (int ii = 0; ii < params.number_of_lights; ii++)
{
BasicLight current_light = params.lights[ii];
final_direct += direct_light_contribution(hit_point, p_normal, p_Kd, current_light);
}
}
//Comoute indirect illumination
int n_vpls = 0;
if (params.s_i)
{
for (int j = 0; j < params.num_hit_vpl; j++)
{
VPL vpl = params.vpls[j];//Select VPL
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
//if (length(vpl.color * G *distScale) > 0.05) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
//}
final_indirect += vpl.color * G * visible*distScale;
//irradiance = make_float3(1,1,1);
}
}
}
final_indirect /= static_cast<float>(n_vpls);
}
prd.result = final_direct + (final_indirect * 5) + vpl_pos_color;
if (params.s_k) {
int pos_color = params.assing_cluster_vector[image_index];
prd.result = params.cluster_color[pos_color] / 2 + prd.result / 2;
}
setRadiancePRD(prd);
}
static
__device__ void compute_R_matrix(float3 p_Kd,
float3 p_normal)
{
const uint3 idx = optixGetLaunchIndex();
int index = idx.x;
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
float3 hit_point = ray_orig + ray_t * ray_dir;
float3 irradiance = make_float3(0.f);
int n_vpls = 0;
for (int j = 0; j < params.num_hit_vpl; j++)
{
VPL vpl = params.vpls[j];//Select VPL
//irradiance = make_float3(1, 1, 1);
params.R_matrix[index*params.num_hit_vpl + j] = make_float3(0.);
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
//if (length(vpl.color * G *distScale) > 0.05) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
//}
params.R_matrix[index*params.num_hit_vpl + j] = vpl.color * G * visible*distScale; //vpl.color
}
}
}
}
static
__device__ void compute_R_matrix_alt_metric(float3 p_Kd,
float3 p_normal) {
const uint3 idx = optixGetLaunchIndex();
int index = idx.x;
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
float3 hit_point = ray_orig + ray_t * ray_dir;
float3 irradiance = make_float3(0.f);
int n_vpls = 0;
for (int j = 0; j < params.num_hit_vpl; j++)
{
VPL vpl = params.vpls[j];//Select VPL
//irradiance = make_float3(1, 1, 1);
params.R_matrix[index*params.num_hit_vpl + j] = make_float3(0.);
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)
);
visible = VPL_prd.attenuation.x;
params.R_matrix[index*params.num_hit_vpl + j] = vpl.color * p_Kd * dot(p_normal, L) * dot(vpl.normal, -L) * visible;
}
}
static
__device__ void result_K_means(float3 p_Kd,
float3 p_normal) {
const uint3 idx = optixGetLaunchIndex();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
const uint32_t image_index = params.width*idx.y + idx.x;
RadiancePRD prd = getRadiancePRD();
float3 hit_point = ray_orig + ray_t * ray_dir;
bool vplhit = false;
float3 vpl_pos_col = make_float3(0.);
float3 final_direct = make_float3(0, 0, 0);
float3 irradiance = make_float3(0.f);
//Show the VPL hit position
if (params.s_v)
{
if (params.show_cluster_VPL) {
for (int j = 0; j < MAX_VPL_CLUSTERS; j++)/* for (int j = 0; j < params.num_vpl*(params.max_bounces + 1); j++)*/
{
int point_cluster = params.assing_cluster_vector[image_index];
int vpl_pos = params.selected_VPL_pos[point_cluster * MAX_VPL_CLUSTERS + j];
VPL showvpl = params.vpls[vpl_pos];
if (showvpl.hit)
{
float3 pos_vpl = showvpl.pos;
float dist = length(hit_point - pos_vpl);
if (dist < 0.05)
{
//vpl_pos = showvpl.color / 4;
//vpl_pos = make_float3(1,1,1);
vpl_pos_col = showvpl.color;// make_float3(1.);
//vpl_pos = params.cluster_color[0];
vplhit = true;
break;
}
}
}
}
if (params.show_cluster_VPL == false) {
for (int i = 0; i < params.num_hit_vpl; i++)
{
VPL showvpl = params.vpls[i];
float3 pos_vpl = showvpl.pos;
float dist = length(hit_point - pos_vpl);
if (dist < 0.05)
{
vpl_pos_col = params.cluster_color[params.VPL_assing_cluster[i]];
//vpl_pos_color = make_float3(1.);
break;
}
}
}
}
//Compute the direct illuminatios
if (params.s_d)
{
for (int ii = 0; ii < params.number_of_lights; ii++)
{
BasicLight current_light = params.lights[ii];
final_direct += direct_light_contribution(hit_point, p_normal, p_Kd, current_light);
}
}
//Indirect VPL irradiance
int n_vpls = 1.f;
if (params.s_i)
{
for (int j = 0; j < MAX_VPL_CLUSTERS; j++)/* for (int j = 0; j < params.num_vpl*(params.max_bounces + 1); j++)*/
{
int point_cluster = params.assing_cluster_vector[image_index];
int vpl_pos = params.selected_VPL_pos[point_cluster * MAX_VPL_CLUSTERS + j];
VPL vpl = params.vpls[vpl_pos];//Select VPL --
//vpl = params.vpls[j];
//irradiance = make_float3(1, 1, 1);
//if (params.vpls[params.first_VPL_cluster[params.VPL_assing_cluster[vpl_pos]]].hit == true) {
if (vpl.hit)
{
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
//if (length(vpl.color * G *distScale) > 0.05) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
//}
irradiance += vpl.color * G * visible*distScale;
//irradiance = make_float3(1,1,1);
}
}
}
//}
}
irradiance /= static_cast<float>(n_vpls);
//irradiance = p_Kd * irradiance;
}
//Direct;
// pass the color back
prd.result = final_direct + (irradiance * 10) + vpl_pos_col;
if (params.s_k) {
int pos_color = params.assing_cluster_vector[image_index];
prd.result = params.cluster_color[pos_color] / 8 + prd.result / 2;
}
setRadiancePRD(prd);
}
extern "C" __global__ void __closesthit__diffuse_radiance()
{
float3 object_normal = make_float3(
int_as_float(optixGetAttribute_0()),
int_as_float(optixGetAttribute_1()),
int_as_float(optixGetAttribute_2()));
float3 world_normal = normalize(optixTransformNormalFromObjectToWorldSpace(object_normal));
float3 ffnormal = faceforward(world_normal, -optixGetWorldRayDirection(), world_normal);
const HitGroupData* sbt_data = (HitGroupData*)optixGetSbtDataPointer();
const Phong &phong = sbt_data->shading.diffuse;
if (params.compute_image && !params.result_K_means) {
phongShade(phong.Kd, ffnormal);
}
if (params.select_space_points) {
const uint3 idx = optixGetLaunchIndex();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
const uint32_t image_index = params.width*idx.y + idx.x;
float3 hit_point = ray_orig + ray_t * ray_dir;
params.normal[image_index] = make_float3(ffnormal.x, ffnormal.y, ffnormal.z);
params.pos[image_index] = make_float3(hit_point.x, hit_point.y, hit_point.z);
}
if (params.compute_R) {
//compute_R_matrix(phong.Kd, ffnormal);
compute_R_matrix_alt_metric(phong.Kd, ffnormal);
}
if (params.compute_image && params.result_K_means) {
result_K_means(phong.Kd,ffnormal);
}
}
extern "C" __global__ void __anyhit__full_occlusion()
{
phongShadowed();
}
extern "C" __global__ void __miss__constant_bg()
{
if (params.compute_image) {
const MissData* sbt_data = (MissData*)optixGetSbtDataPointer();
RadiancePRD prd = getRadiancePRD();
prd.result = sbt_data->bg_color;
setRadiancePRD(prd);
}
if (params.select_space_points) {
const uint3 idx = optixGetLaunchIndex();
const uint32_t image_index = params.width*idx.y + idx.x;
params.normal[image_index] = make_float3(0, 0, 0);
params.pos[image_index] = make_float3(1000, 1000, 1000);
}
}
| 811c40a8af53867df6280e63bf614d36c10ba1b4.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <vector_types.h>
#include <optix_device.h>
#include "optixHello.h"
#include "helpers.h"
#include "random.h"
//bool is_vpl = false;
#define MIN_DIST2 0.1f//*vpl_dist_scale_square
#define VPL_SHADOW_OFFSET 10.f
extern "C" {
__constant__ Params params;
}
//_________BASIC FUNCTIONS (UTILS)_______________________________________________________________________
__device__ __inline__ float Clamp(float val, float low, float high) {
if (val < low) return low;
else if (val > high) return high;
else return val;
}
__device__ __inline__ float SmoothStep(float min, float max, float value) {
float v = Clamp((value - min) / (max - min), 0.f, 1.f);
return v * v * (-2.f * v + 3.f);
}
__device__ __inline__ float map(float value, float start1, float stop1, float start2, float stop2) {
return start2 + (stop2 - start2) * ((value - start1) / (stop1 - start1));
}
//_______________________________________________________________________
//------------------GET AND SET RAY INFO------------------
static __device__ __inline__ RadiancePRD getRadiancePRD()
{
RadiancePRD prd;
prd.result.x = int_as_float(optixGetPayload_0());
prd.result.y = int_as_float(optixGetPayload_1());
prd.result.z = int_as_float(optixGetPayload_2());
prd.light_number = int_as_float(optixGetPayload_3());
prd.depth = optixGetPayload_4();
return prd;
}
static __device__ __inline__ void setRadiancePRD(const RadiancePRD &prd)
{
optixSetPayload_0(float_as_int(prd.result.x));
optixSetPayload_1(float_as_int(prd.result.y));
optixSetPayload_2(float_as_int(prd.result.z));
optixSetPayload_3(float_as_int(prd.light_number));
optixSetPayload_4(prd.depth);
}
static __device__ __inline__ OcclusionPRD getOcclusionPRD()
{
OcclusionPRD prd;
prd.attenuation.x = int_as_float(optixGetPayload_0());
prd.attenuation.y = int_as_float(optixGetPayload_1());
prd.attenuation.z = int_as_float(optixGetPayload_2());
return prd;
}
static __device__ __inline__ void setOcclusionPRD(const OcclusionPRD &prd)
{
optixSetPayload_0(float_as_int(prd.attenuation.x));
optixSetPayload_1(float_as_int(prd.attenuation.y));
optixSetPayload_2(float_as_int(prd.attenuation.z));
}
//-------------------------------------------------------
//Compute ray value when ray misses the scene
__device__ void phongShadowed()
{
// this material is opaque, so it fully attenuates all shadow rays
OcclusionPRD prd;
prd.attenuation = make_float3(0.f);
setOcclusionPRD(prd);
optixTerminateRay();
}
//Function to mark where the VPL are ins the scene
__device__ __inline__ float3 show_VPL(float3 hit_point) {
float3 vpl_pos_color = make_float3(0.);
for (int i = 0; i < params.num_hit_vpl; i++)
{
VPL showvpl = params.vpls[i];
float3 pos_vpl = showvpl.pos;
float dist = length(hit_point - pos_vpl);
if (dist < 0.05)
{
vpl_pos_color = make_float3(1.);//showvpl.color;// params.cluster_color[params.VPL_assing_cluster[i]];
//vpl_pos_color = make_float3(1.);
break;
}
}
return vpl_pos_color;
}
//Comoute direct illumination of each light source2
__device__ __inline__
float3 direct_light_contribution(float3 hit_point, float3 p_normal, float3 p_Kd, BasicLight current_light) {
float3 direct_contribution = make_float3(0.);
BasicLight light = current_light;
float Ldist = length(light.pos - hit_point);
float3 L = normalize(light.pos - hit_point);
float nDl = dot(p_normal, L);
// cast shadow ray
float3 light_attenuation = make_float3(static_cast<float>(nDl > 0.0f));
if (nDl > 0.0f)
{
OcclusionPRD shadow_prd;
shadow_prd.attenuation = make_float3(1.0f);
optixTrace(
params.handle,
hit_point,
L,
0.01f,
Ldist,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(shadow_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
light_attenuation = shadow_prd.attenuation;
}
// If not completely shadowed, light the hit point
if (fmaxf(light_attenuation) > 0.0f)
{
float3 Lc = light.color * light_attenuation;
direct_contribution += p_Kd * nDl * Lc;
}
return direct_contribution;
}
//Compute irradiance of each VPL.
__device__ __inline__
float3 VPL_contribution(float3 hit_point, float3 p_normal, float3 p_Kd, VPL current_VPL) {
float3 irradiance = make_float3(0.);
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(current_VPL.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(current_VPL.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - current_VPL.pos);
float nvDl2 = dot(current_VPL.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
float3 result = current_VPL.color * G *distScale;
if (length(result) > 0.02) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
}
irradiance += result * visible;
}
}
return irradiance;
}
//Compute the total illumination of one point.
static
__device__ void phongShade(float3 p_Kd,
float3 p_normal)
{
//Extract ray and thread information
const uint3 idx = optixGetLaunchIndex();
const uint32_t image_index = params.width*idx.y + idx.x;
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
RadiancePRD prd = getRadiancePRD();
//Compute hit point
float3 hit_point = ray_orig + ray_t * ray_dir;
//Init different types of illumiantion
float3 final_direct = make_float3(0, 0, 0);
float3 final_indirect = make_float3(0.f);
//Init VPL show color
float3 vpl_pos_color = make_float3(0.);
//Show the VPL hit position
if (params.s_v)
{
vpl_pos_color = show_VPL(hit_point);
}
//Compute the direct illuminatios
if (params.s_d)
{
for (int ii = 0; ii < params.number_of_lights; ii++)
{
BasicLight current_light = params.lights[ii];
final_direct += direct_light_contribution(hit_point, p_normal, p_Kd, current_light);
}
}
//Comoute indirect illumination
int n_vpls = 0;
if (params.s_i)
{
for (int j = 0; j < params.num_hit_vpl; j++)
{
VPL vpl = params.vpls[j];//Select VPL
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
//if (length(vpl.color * G *distScale) > 0.05) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
//}
final_indirect += vpl.color * G * visible*distScale;
//irradiance = make_float3(1,1,1);
}
}
}
final_indirect /= static_cast<float>(n_vpls);
}
prd.result = final_direct + (final_indirect * 5) + vpl_pos_color;
if (params.s_k) {
int pos_color = params.assing_cluster_vector[image_index];
prd.result = params.cluster_color[pos_color] / 2 + prd.result / 2;
}
setRadiancePRD(prd);
}
static
__device__ void compute_R_matrix(float3 p_Kd,
float3 p_normal)
{
const uint3 idx = optixGetLaunchIndex();
int index = idx.x;
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
float3 hit_point = ray_orig + ray_t * ray_dir;
float3 irradiance = make_float3(0.f);
int n_vpls = 0;
for (int j = 0; j < params.num_hit_vpl; j++)
{
VPL vpl = params.vpls[j];//Select VPL
//irradiance = make_float3(1, 1, 1);
params.R_matrix[index*params.num_hit_vpl + j] = make_float3(0.);
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
//if (length(vpl.color * G *distScale) > 0.05) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
//}
params.R_matrix[index*params.num_hit_vpl + j] = vpl.color * G * visible*distScale; //vpl.color
}
}
}
}
static
__device__ void compute_R_matrix_alt_metric(float3 p_Kd,
float3 p_normal) {
const uint3 idx = optixGetLaunchIndex();
int index = idx.x;
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
float3 hit_point = ray_orig + ray_t * ray_dir;
float3 irradiance = make_float3(0.f);
int n_vpls = 0;
for (int j = 0; j < params.num_hit_vpl; j++)
{
VPL vpl = params.vpls[j];//Select VPL
//irradiance = make_float3(1, 1, 1);
params.R_matrix[index*params.num_hit_vpl + j] = make_float3(0.);
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)
);
visible = VPL_prd.attenuation.x;
params.R_matrix[index*params.num_hit_vpl + j] = vpl.color * p_Kd * dot(p_normal, L) * dot(vpl.normal, -L) * visible;
}
}
static
__device__ void result_K_means(float3 p_Kd,
float3 p_normal) {
const uint3 idx = optixGetLaunchIndex();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
const uint32_t image_index = params.width*idx.y + idx.x;
RadiancePRD prd = getRadiancePRD();
float3 hit_point = ray_orig + ray_t * ray_dir;
bool vplhit = false;
float3 vpl_pos_col = make_float3(0.);
float3 final_direct = make_float3(0, 0, 0);
float3 irradiance = make_float3(0.f);
//Show the VPL hit position
if (params.s_v)
{
if (params.show_cluster_VPL) {
for (int j = 0; j < MAX_VPL_CLUSTERS; j++)/* for (int j = 0; j < params.num_vpl*(params.max_bounces + 1); j++)*/
{
int point_cluster = params.assing_cluster_vector[image_index];
int vpl_pos = params.selected_VPL_pos[point_cluster * MAX_VPL_CLUSTERS + j];
VPL showvpl = params.vpls[vpl_pos];
if (showvpl.hit)
{
float3 pos_vpl = showvpl.pos;
float dist = length(hit_point - pos_vpl);
if (dist < 0.05)
{
//vpl_pos = showvpl.color / 4;
//vpl_pos = make_float3(1,1,1);
vpl_pos_col = showvpl.color;// make_float3(1.);
//vpl_pos = params.cluster_color[0];
vplhit = true;
break;
}
}
}
}
if (params.show_cluster_VPL == false) {
for (int i = 0; i < params.num_hit_vpl; i++)
{
VPL showvpl = params.vpls[i];
float3 pos_vpl = showvpl.pos;
float dist = length(hit_point - pos_vpl);
if (dist < 0.05)
{
vpl_pos_col = params.cluster_color[params.VPL_assing_cluster[i]];
//vpl_pos_color = make_float3(1.);
break;
}
}
}
}
//Compute the direct illuminatios
if (params.s_d)
{
for (int ii = 0; ii < params.number_of_lights; ii++)
{
BasicLight current_light = params.lights[ii];
final_direct += direct_light_contribution(hit_point, p_normal, p_Kd, current_light);
}
}
//Indirect VPL irradiance
int n_vpls = 1.f;
if (params.s_i)
{
for (int j = 0; j < MAX_VPL_CLUSTERS; j++)/* for (int j = 0; j < params.num_vpl*(params.max_bounces + 1); j++)*/
{
int point_cluster = params.assing_cluster_vector[image_index];
int vpl_pos = params.selected_VPL_pos[point_cluster * MAX_VPL_CLUSTERS + j];
VPL vpl = params.vpls[vpl_pos];//Select VPL --
//vpl = params.vpls[j];
//irradiance = make_float3(1, 1, 1);
//if (params.vpls[params.first_VPL_cluster[params.VPL_assing_cluster[vpl_pos]]].hit == true) {
if (vpl.hit)
{
n_vpls++;//Know how vpl influence the scene
//Compute the incident direction of the light coming from the current VPL.
float3 L = normalize(vpl.pos - hit_point);
//Compute its angle with the point
float nDl = dot(p_normal, L);
//If the angle exits between -90 and 90 degrees the VPL can add its contribution
if (nDl >= 0.0f)
{
float Ldist = length(vpl.pos - hit_point);//Distance between point and VPL
float Ldist2 = Ldist * Ldist;// Square of the distane
//Apply smooth step to aviod aberrations
float distScale = SmoothStep(.0f + params.minSS, 20.f + params.maxSS, Ldist2);
//float distScale = Ldist2;
if (distScale > 0.f)
{
float visible;
OcclusionPRD VPL_prd;
VPL_prd.attenuation = make_float3(1.0f);
//Geometric term
float3 L2 = normalize(hit_point - vpl.pos);
float nvDl2 = dot(vpl.normal, L2);
float G = fabs(nvDl2 * nDl) / Ldist2;// dividod por Ldist2
//if (length(vpl.color * G *distScale) > 0.05) {
optixTrace(
params.handle,
hit_point,
L,
0.01f * VPL_SHADOW_OFFSET,
Ldist - 0.01,
0.0f,
OptixVisibilityMask(1),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_OCCLUSION,
RAY_TYPE_COUNT,
RAY_TYPE_OCCLUSION,
float3_as_args(VPL_prd.attenuation)/*,
reinterpret_cast<uint32_t&>(shadow_prd.is_indirect)*/);
visible = VPL_prd.attenuation.x;
//}
irradiance += vpl.color * G * visible*distScale;
//irradiance = make_float3(1,1,1);
}
}
}
//}
}
irradiance /= static_cast<float>(n_vpls);
//irradiance = p_Kd * irradiance;
}
//Direct;
// pass the color back
prd.result = final_direct + (irradiance * 10) + vpl_pos_col;
if (params.s_k) {
int pos_color = params.assing_cluster_vector[image_index];
prd.result = params.cluster_color[pos_color] / 8 + prd.result / 2;
}
setRadiancePRD(prd);
}
extern "C" __global__ void __closesthit__diffuse_radiance()
{
float3 object_normal = make_float3(
int_as_float(optixGetAttribute_0()),
int_as_float(optixGetAttribute_1()),
int_as_float(optixGetAttribute_2()));
float3 world_normal = normalize(optixTransformNormalFromObjectToWorldSpace(object_normal));
float3 ffnormal = faceforward(world_normal, -optixGetWorldRayDirection(), world_normal);
const HitGroupData* sbt_data = (HitGroupData*)optixGetSbtDataPointer();
const Phong &phong = sbt_data->shading.diffuse;
if (params.compute_image && !params.result_K_means) {
phongShade(phong.Kd, ffnormal);
}
if (params.select_space_points) {
const uint3 idx = optixGetLaunchIndex();
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_t = optixGetRayTmax();
const uint32_t image_index = params.width*idx.y + idx.x;
float3 hit_point = ray_orig + ray_t * ray_dir;
params.normal[image_index] = make_float3(ffnormal.x, ffnormal.y, ffnormal.z);
params.pos[image_index] = make_float3(hit_point.x, hit_point.y, hit_point.z);
}
if (params.compute_R) {
//compute_R_matrix(phong.Kd, ffnormal);
compute_R_matrix_alt_metric(phong.Kd, ffnormal);
}
if (params.compute_image && params.result_K_means) {
result_K_means(phong.Kd,ffnormal);
}
}
extern "C" __global__ void __anyhit__full_occlusion()
{
phongShadowed();
}
extern "C" __global__ void __miss__constant_bg()
{
if (params.compute_image) {
const MissData* sbt_data = (MissData*)optixGetSbtDataPointer();
RadiancePRD prd = getRadiancePRD();
prd.result = sbt_data->bg_color;
setRadiancePRD(prd);
}
if (params.select_space_points) {
const uint3 idx = optixGetLaunchIndex();
const uint32_t image_index = params.width*idx.y + idx.x;
params.normal[image_index] = make_float3(0, 0, 0);
params.pos[image_index] = make_float3(1000, 1000, 1000);
}
}
|
f5a36bc3e0add72a3f29dc6ef22d13a4c474bbd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/mmcv_gridsample.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/common/tensor_shape.h"
#include <hip/hip_fp16.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define CLIP_COORDINATES(in, out, clip_limit) out = MIN((clip_limit - 1), MAX(in, 0))
template <typename T>
static inline __device__ T grid_sampler_unnormalize(T coord, int64_t size, bool align_corners)
{
if (align_corners) {
return ((coord + 1) / 2) * (size - 1);
} else {
return ((coord + 1) * size - 1) / 2;
}
}
// Reflects coordinates until they fall between low and high (inclusive).
// The bounds are passed as twice their value so that half-integer values
// can be represented as ints.
template <typename T>
static inline __device__ T reflect_coordinates(T in, int64_t twice_low, int64_t twice_high)
{
if (twice_low == twice_high) {
return static_cast<T>(0);
}
T min = static_cast<T>(twice_low) / 2;
T span = static_cast<T>(twice_high - twice_low) / 2;
in = fabsf(in - min);
// `fmod` returns same sign as `in`, which is positive after the `fabs` above.
T extra = fmodf(in, span);
int flips = static_cast<int>(floorf(in / span));
if (flips % 2 == 0) {
return extra + min;
} else {
return span - extra + min;
}
}
// padding_mode: zeros == 0, border == 1, reflection == 2;
template <typename T>
static inline __device__ T compute_coordinates(T coord, int64_t size, int64_t padding_mode, bool align_corners)
{
if (padding_mode == 1) {
CLIP_COORDINATES(coord, coord, size);
} else if (padding_mode == 2) {
if (align_corners) {
coord = reflect_coordinates(coord, 0, 2 * (size - 1));
} else {
coord = reflect_coordinates(coord, -1, 2 * size - 1);
}
CLIP_COORDINATES(coord, coord, size);
}
return coord;
}
// Computes the pixel source index value for a grid coordinate
template <typename T>
static inline __device__ T grid_sampler_compute_source_index(T coord, int64_t size, int64_t padding_mode, bool align_corners)
{
coord = grid_sampler_unnormalize(coord, size, align_corners);
coord = compute_coordinates(coord, size, padding_mode, align_corners);
return coord;
}
static inline __device__ bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W)
{
return h >= 0 && h < H && w >= 0 && w < W;
}
template <typename T>
static inline T __device__ get_value_bounded(const T* data, float x, float y, int64_t W, int64_t H, int64_t sW, int64_t sH, int64_t padding_mode, bool align_corners)
{
x = compute_coordinates(x, W, padding_mode, align_corners);
y = compute_coordinates(y, H, padding_mode, align_corners);
int64_t ix = static_cast<int64_t>(x);
int64_t iy = static_cast<int64_t>(y);
if (within_bounds_2d(iy, ix, H, W)) {
return data[iy * sH + ix * sW];
}
return (T)(0);
}
template <typename T>
static inline __device__ T cubic_convolution1(T x, T A)
{
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
static inline __device__ T cubic_convolution2(T x, T A)
{
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
static inline __device__ void get_cubic_upsample_coefficients(T coeffs[4],
T t)
{
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline __device__ T cubic_interp1d(T x0, T x1, T x2, T x3, T t)
{
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
// interpolation mode: bilinear == 0, nearest == 1, bicubic == 2;
__global__ void ppl_cukernel_gridsample_fp32(
const int num,
const int channels,
const int height,
const int width,
const int in_height,
const int in_width,
const int num_threads,
const float* input0,
const float* input1,
float* output,
int align_corners,
int padding_mode,
int interpolation_mode)
{
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_threads)
return;
size_t chw = channels * height * width;
size_t hw = height * width;
size_t in_hw = in_height * in_width;
size_t batch_idx = idx / chw;
size_t c_idx = (idx / hw) % channels;
size_t h_idx = (idx / width) % height;
size_t w_idx = (idx % width);
int grid_idx = (batch_idx * hw + h_idx * width + w_idx) << 1;
float x = input1[grid_idx + 0];
float y = input1[grid_idx + 1];
if (isinf(x) || isinf(y)) {
output[idx] = 0;
return;
}
float ix = grid_sampler_compute_source_index(x, in_width, padding_mode, align_corners);
float iy = grid_sampler_compute_source_index(y, in_height, padding_mode, align_corners);
// get NE, NW, SE, SW pixel values from (x, y)
if (interpolation_mode == 0) { // bilinear
int ix_nw = (int)floorf(ix);
int iy_nw = (int)floorf(iy);
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
float nw = (ix_se - ix) * (iy_se - iy);
float ne = (ix - ix_sw) * (iy_sw - iy);
float sw = (ix_ne - ix) * (iy - iy_ne);
float se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
float nw_val = ((ix_nw >= 0) && (iy_nw >= 0) && (ix_nw < in_width) && (iy_nw < in_height)) ? input0[in_idx + iy_nw * in_width + ix_nw] : 0;
float ne_val = ((ix_ne >= 0) && (iy_ne >= 0) && (ix_ne < in_width) && (iy_ne < in_height)) ? input0[in_idx + iy_ne * in_width + ix_ne] : 0;
float sw_val = ((ix_sw >= 0) && (iy_sw >= 0) && (ix_sw < in_width) && (iy_sw < in_height)) ? input0[in_idx + iy_sw * in_width + ix_sw] : 0;
float se_val = ((ix_se >= 0) && (iy_se >= 0) && (ix_se < in_width) && (iy_se < in_height)) ? input0[in_idx + iy_se * in_width + ix_se] : 0;
float out_val = nw_val * nw + ne_val * ne + sw_val * sw + se_val * se;
output[idx] = out_val;
} else if (interpolation_mode == 1) { // nearest
int64_t ix_nearest = static_cast<int64_t>(floorf(0.5 + ix));
int64_t iy_nearest = static_cast<int64_t>(floorf(0.5 + iy));
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
output[idx] = ((ix_nearest >= 0) && (iy_nearest >= 0) && (ix_nearest < in_width) && (iy_nearest < in_height)) ? input0[in_idx + iy_nearest * in_width + ix_nearest] : 0;
} else { // bicubic
ix = grid_sampler_unnormalize(x, in_width, align_corners);
iy = grid_sampler_unnormalize(y, in_height, align_corners);
float ix_nw = floorf(ix);
float iy_nw = floorf(iy);
const float tx = ix - ix_nw;
const float ty = iy - iy_nw;
const float* inp_ptr_NC = input0 + batch_idx * in_hw;
float coefficients[4];
// Interpolate 4 values in the x directon
for (int64_t i = 0; i < 4; ++i) {
coefficients[i] = cubic_interp1d<float>(
get_value_bounded<float>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
get_value_bounded<float>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
get_value_bounded<float>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
get_value_bounded<float>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
tx);
}
// Interpolate in the y direction
output[idx] = cubic_interp1d<float>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty);
}
}
__global__ void ppl_cukernel_gridsample_fp16(
const int num,
const int channels,
const int height,
const int width,
const int in_height,
const int in_width,
const int num_threads,
const half* input0,
const half* input1,
half* output,
int align_corners,
int padding_mode,
int interpolation_mode)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_threads)
return;
size_t chw = channels * height * width;
size_t hw = height * width;
size_t in_hw = in_height * in_width;
size_t batch_idx = idx / chw;
size_t c_idx = (idx / hw) % channels;
size_t h_idx = (idx / width) % height;
size_t w_idx = (idx % width);
int grid_idx = (batch_idx * hw + h_idx * width + w_idx) << 1;
float x = __half2float(input1[grid_idx + 0]);
float y = __half2float(input1[grid_idx + 1]);
if (isinf(x) || isinf(y)) {
output[idx] = (half)0;
return;
}
float ix = grid_sampler_compute_source_index(x, in_width, padding_mode, align_corners);
float iy = grid_sampler_compute_source_index(y, in_height, padding_mode, align_corners);
// get NE, NW, SE, SW pixel values from (x, y)
if (interpolation_mode == 0) { // bilinear
int ix_nw = (int)floorf(ix);
int iy_nw = (int)floorf(iy);
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
float nw = (ix_se - ix) * (iy_se - iy);
float ne = (ix - ix_sw) * (iy_sw - iy);
float sw = (ix_ne - ix) * (iy - iy_ne);
float se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
float nw_val = ((ix_nw >= 0) && (iy_nw >= 0) && (ix_nw < in_width) && (iy_nw < in_height)) ? __half2float(input0[in_idx + iy_nw * in_width + ix_nw]) : 0;
float ne_val = ((ix_ne >= 0) && (iy_ne >= 0) && (ix_ne < in_width) && (iy_ne < in_height)) ? __half2float(input0[in_idx + iy_ne * in_width + ix_ne]) : 0;
float sw_val = ((ix_sw >= 0) && (iy_sw >= 0) && (ix_sw < in_width) && (iy_sw < in_height)) ? __half2float(input0[in_idx + iy_sw * in_width + ix_sw]) : 0;
float se_val = ((ix_se >= 0) && (iy_se >= 0) && (ix_se < in_width) && (iy_se < in_height)) ? __half2float(input0[in_idx + iy_se * in_width + ix_se]) : 0;
float out_val = nw_val * nw + ne_val * ne + sw_val * sw + se_val * se;
output[idx] = __float2half(out_val);
} else if (interpolation_mode == 1) { // nearest
int64_t ix_nearest = static_cast<int64_t>(floorf(0.5 + ix));
int64_t iy_nearest = static_cast<int64_t>(floorf(0.5 + iy));
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
output[idx] = ((ix_nearest >= 0) && (iy_nearest >= 0) && (ix_nearest < in_width) && (iy_nearest < in_height)) ? input0[in_idx + iy_nearest * in_width + ix_nearest] : half(0);
} else { // bicubic
ix = grid_sampler_unnormalize(x, in_width, align_corners);
iy = grid_sampler_unnormalize(y, in_height, align_corners);
float ix_nw = floorf(ix);
float iy_nw = floorf(iy);
const float tx = ix - ix_nw;
const float ty = iy - iy_nw;
const half* inp_ptr_NC = input0 + batch_idx * in_hw;
float coefficients[4];
// Interpolate 4 values in the x directon
for (int64_t i = 0; i < 4; ++i) {
coefficients[i] = cubic_interp1d<float>(
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
tx);
}
// Interpolate in the y direction
output[idx] = __float2half(cubic_interp1d<float>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty));
}
#endif
}
ppl::common::RetCode PPLCUDAMMCVGridSampleForwardImp(
hipStream_t stream,
ppl::nn::TensorShape* input0_shape,
const void* input0,
ppl::nn::TensorShape* input1_shape,
const void* input1,
ppl::nn::TensorShape* output_shape,
void* output,
ppl::nn::common::MMCVGridSampleParam param)
{
int block_size = 256;
int out_n = output_shape->GetDim(0);
int out_c = output_shape->GetDim(1);
int out_h = output_shape->GetDim(2);
int out_w = output_shape->GetDim(3);
int in_h = input0_shape->GetDim(2);
int in_w = input0_shape->GetDim(3);
int64_t num_elems = output_shape->GetElementsIncludingPadding();
int grid_size = DivUp(num_elems, block_size);
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( ppl_cukernel_gridsample_fp32), dim3(grid_size), dim3(block_size), 0, stream,
out_n, out_c, out_h, out_w, in_h, in_w, num_elems, (float*)input0, (float*)input1, (float*)output, param.align_corners, param.padding_mode, param.interpolation_mode);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( ppl_cukernel_gridsample_fp16), dim3(grid_size), dim3(block_size), 0, stream,
out_n, out_c, out_h, out_w, in_h, in_w, num_elems, (half*)input0, (half*)input1, (half*)output, param.align_corners, param.padding_mode, param.interpolation_mode);
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
}
| f5a36bc3e0add72a3f29dc6ef22d13a4c474bbd8.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/mmcv_gridsample.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/common/tensor_shape.h"
#include <cuda_fp16.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define CLIP_COORDINATES(in, out, clip_limit) out = MIN((clip_limit - 1), MAX(in, 0))
template <typename T>
static inline __device__ T grid_sampler_unnormalize(T coord, int64_t size, bool align_corners)
{
if (align_corners) {
return ((coord + 1) / 2) * (size - 1);
} else {
return ((coord + 1) * size - 1) / 2;
}
}
// Reflects coordinates until they fall between low and high (inclusive).
// The bounds are passed as twice their value so that half-integer values
// can be represented as ints.
template <typename T>
static inline __device__ T reflect_coordinates(T in, int64_t twice_low, int64_t twice_high)
{
if (twice_low == twice_high) {
return static_cast<T>(0);
}
T min = static_cast<T>(twice_low) / 2;
T span = static_cast<T>(twice_high - twice_low) / 2;
in = fabsf(in - min);
// `fmod` returns same sign as `in`, which is positive after the `fabs` above.
T extra = fmodf(in, span);
int flips = static_cast<int>(floorf(in / span));
if (flips % 2 == 0) {
return extra + min;
} else {
return span - extra + min;
}
}
// padding_mode: zeros == 0, border == 1, reflection == 2;
template <typename T>
static inline __device__ T compute_coordinates(T coord, int64_t size, int64_t padding_mode, bool align_corners)
{
if (padding_mode == 1) {
CLIP_COORDINATES(coord, coord, size);
} else if (padding_mode == 2) {
if (align_corners) {
coord = reflect_coordinates(coord, 0, 2 * (size - 1));
} else {
coord = reflect_coordinates(coord, -1, 2 * size - 1);
}
CLIP_COORDINATES(coord, coord, size);
}
return coord;
}
// Computes the pixel source index value for a grid coordinate
template <typename T>
static inline __device__ T grid_sampler_compute_source_index(T coord, int64_t size, int64_t padding_mode, bool align_corners)
{
coord = grid_sampler_unnormalize(coord, size, align_corners);
coord = compute_coordinates(coord, size, padding_mode, align_corners);
return coord;
}
static inline __device__ bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W)
{
return h >= 0 && h < H && w >= 0 && w < W;
}
template <typename T>
static inline T __device__ get_value_bounded(const T* data, float x, float y, int64_t W, int64_t H, int64_t sW, int64_t sH, int64_t padding_mode, bool align_corners)
{
x = compute_coordinates(x, W, padding_mode, align_corners);
y = compute_coordinates(y, H, padding_mode, align_corners);
int64_t ix = static_cast<int64_t>(x);
int64_t iy = static_cast<int64_t>(y);
if (within_bounds_2d(iy, ix, H, W)) {
return data[iy * sH + ix * sW];
}
return (T)(0);
}
template <typename T>
static inline __device__ T cubic_convolution1(T x, T A)
{
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
static inline __device__ T cubic_convolution2(T x, T A)
{
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
static inline __device__ void get_cubic_upsample_coefficients(T coeffs[4],
T t)
{
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline __device__ T cubic_interp1d(T x0, T x1, T x2, T x3, T t)
{
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
// interpolation mode: bilinear == 0, nearest == 1, bicubic == 2;
__global__ void ppl_cukernel_gridsample_fp32(
const int num,
const int channels,
const int height,
const int width,
const int in_height,
const int in_width,
const int num_threads,
const float* input0,
const float* input1,
float* output,
int align_corners,
int padding_mode,
int interpolation_mode)
{
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_threads)
return;
size_t chw = channels * height * width;
size_t hw = height * width;
size_t in_hw = in_height * in_width;
size_t batch_idx = idx / chw;
size_t c_idx = (idx / hw) % channels;
size_t h_idx = (idx / width) % height;
size_t w_idx = (idx % width);
int grid_idx = (batch_idx * hw + h_idx * width + w_idx) << 1;
float x = input1[grid_idx + 0];
float y = input1[grid_idx + 1];
if (isinf(x) || isinf(y)) {
output[idx] = 0;
return;
}
float ix = grid_sampler_compute_source_index(x, in_width, padding_mode, align_corners);
float iy = grid_sampler_compute_source_index(y, in_height, padding_mode, align_corners);
// get NE, NW, SE, SW pixel values from (x, y)
if (interpolation_mode == 0) { // bilinear
int ix_nw = (int)floorf(ix);
int iy_nw = (int)floorf(iy);
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
float nw = (ix_se - ix) * (iy_se - iy);
float ne = (ix - ix_sw) * (iy_sw - iy);
float sw = (ix_ne - ix) * (iy - iy_ne);
float se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
float nw_val = ((ix_nw >= 0) && (iy_nw >= 0) && (ix_nw < in_width) && (iy_nw < in_height)) ? input0[in_idx + iy_nw * in_width + ix_nw] : 0;
float ne_val = ((ix_ne >= 0) && (iy_ne >= 0) && (ix_ne < in_width) && (iy_ne < in_height)) ? input0[in_idx + iy_ne * in_width + ix_ne] : 0;
float sw_val = ((ix_sw >= 0) && (iy_sw >= 0) && (ix_sw < in_width) && (iy_sw < in_height)) ? input0[in_idx + iy_sw * in_width + ix_sw] : 0;
float se_val = ((ix_se >= 0) && (iy_se >= 0) && (ix_se < in_width) && (iy_se < in_height)) ? input0[in_idx + iy_se * in_width + ix_se] : 0;
float out_val = nw_val * nw + ne_val * ne + sw_val * sw + se_val * se;
output[idx] = out_val;
} else if (interpolation_mode == 1) { // nearest
int64_t ix_nearest = static_cast<int64_t>(floorf(0.5 + ix));
int64_t iy_nearest = static_cast<int64_t>(floorf(0.5 + iy));
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
output[idx] = ((ix_nearest >= 0) && (iy_nearest >= 0) && (ix_nearest < in_width) && (iy_nearest < in_height)) ? input0[in_idx + iy_nearest * in_width + ix_nearest] : 0;
} else { // bicubic
ix = grid_sampler_unnormalize(x, in_width, align_corners);
iy = grid_sampler_unnormalize(y, in_height, align_corners);
float ix_nw = floorf(ix);
float iy_nw = floorf(iy);
const float tx = ix - ix_nw;
const float ty = iy - iy_nw;
const float* inp_ptr_NC = input0 + batch_idx * in_hw;
float coefficients[4];
// Interpolate 4 values in the x directon
for (int64_t i = 0; i < 4; ++i) {
coefficients[i] = cubic_interp1d<float>(
get_value_bounded<float>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
get_value_bounded<float>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
get_value_bounded<float>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
get_value_bounded<float>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners),
tx);
}
// Interpolate in the y direction
output[idx] = cubic_interp1d<float>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty);
}
}
__global__ void ppl_cukernel_gridsample_fp16(
const int num,
const int channels,
const int height,
const int width,
const int in_height,
const int in_width,
const int num_threads,
const half* input0,
const half* input1,
half* output,
int align_corners,
int padding_mode,
int interpolation_mode)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_threads)
return;
size_t chw = channels * height * width;
size_t hw = height * width;
size_t in_hw = in_height * in_width;
size_t batch_idx = idx / chw;
size_t c_idx = (idx / hw) % channels;
size_t h_idx = (idx / width) % height;
size_t w_idx = (idx % width);
int grid_idx = (batch_idx * hw + h_idx * width + w_idx) << 1;
float x = __half2float(input1[grid_idx + 0]);
float y = __half2float(input1[grid_idx + 1]);
if (isinf(x) || isinf(y)) {
output[idx] = (half)0;
return;
}
float ix = grid_sampler_compute_source_index(x, in_width, padding_mode, align_corners);
float iy = grid_sampler_compute_source_index(y, in_height, padding_mode, align_corners);
// get NE, NW, SE, SW pixel values from (x, y)
if (interpolation_mode == 0) { // bilinear
int ix_nw = (int)floorf(ix);
int iy_nw = (int)floorf(iy);
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
float nw = (ix_se - ix) * (iy_se - iy);
float ne = (ix - ix_sw) * (iy_sw - iy);
float sw = (ix_ne - ix) * (iy - iy_ne);
float se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
float nw_val = ((ix_nw >= 0) && (iy_nw >= 0) && (ix_nw < in_width) && (iy_nw < in_height)) ? __half2float(input0[in_idx + iy_nw * in_width + ix_nw]) : 0;
float ne_val = ((ix_ne >= 0) && (iy_ne >= 0) && (ix_ne < in_width) && (iy_ne < in_height)) ? __half2float(input0[in_idx + iy_ne * in_width + ix_ne]) : 0;
float sw_val = ((ix_sw >= 0) && (iy_sw >= 0) && (ix_sw < in_width) && (iy_sw < in_height)) ? __half2float(input0[in_idx + iy_sw * in_width + ix_sw]) : 0;
float se_val = ((ix_se >= 0) && (iy_se >= 0) && (ix_se < in_width) && (iy_se < in_height)) ? __half2float(input0[in_idx + iy_se * in_width + ix_se]) : 0;
float out_val = nw_val * nw + ne_val * ne + sw_val * sw + se_val * se;
output[idx] = __float2half(out_val);
} else if (interpolation_mode == 1) { // nearest
int64_t ix_nearest = static_cast<int64_t>(floorf(0.5 + ix));
int64_t iy_nearest = static_cast<int64_t>(floorf(0.5 + iy));
size_t in_idx = batch_idx * channels * in_hw + c_idx * in_hw;
output[idx] = ((ix_nearest >= 0) && (iy_nearest >= 0) && (ix_nearest < in_width) && (iy_nearest < in_height)) ? input0[in_idx + iy_nearest * in_width + ix_nearest] : half(0);
} else { // bicubic
ix = grid_sampler_unnormalize(x, in_width, align_corners);
iy = grid_sampler_unnormalize(y, in_height, align_corners);
float ix_nw = floorf(ix);
float iy_nw = floorf(iy);
const float tx = ix - ix_nw;
const float ty = iy - iy_nw;
const half* inp_ptr_NC = input0 + batch_idx * in_hw;
float coefficients[4];
// Interpolate 4 values in the x directon
for (int64_t i = 0; i < 4; ++i) {
coefficients[i] = cubic_interp1d<float>(
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
__half2float(get_value_bounded<half>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, in_width, in_height, 1, in_width, padding_mode, align_corners)),
tx);
}
// Interpolate in the y direction
output[idx] = __float2half(cubic_interp1d<float>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty));
}
#endif
}
ppl::common::RetCode PPLCUDAMMCVGridSampleForwardImp(
cudaStream_t stream,
ppl::nn::TensorShape* input0_shape,
const void* input0,
ppl::nn::TensorShape* input1_shape,
const void* input1,
ppl::nn::TensorShape* output_shape,
void* output,
ppl::nn::common::MMCVGridSampleParam param)
{
int block_size = 256;
int out_n = output_shape->GetDim(0);
int out_c = output_shape->GetDim(1);
int out_h = output_shape->GetDim(2);
int out_w = output_shape->GetDim(3);
int in_h = input0_shape->GetDim(2);
int in_w = input0_shape->GetDim(3);
int64_t num_elems = output_shape->GetElementsIncludingPadding();
int grid_size = DivUp(num_elems, block_size);
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
ppl_cukernel_gridsample_fp32<<<grid_size, block_size, 0, stream>>>(
out_n, out_c, out_h, out_w, in_h, in_w, num_elems, (float*)input0, (float*)input1, (float*)output, param.align_corners, param.padding_mode, param.interpolation_mode);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
ppl_cukernel_gridsample_fp16<<<grid_size, block_size, 0, stream>>>(
out_n, out_c, out_h, out_w, in_h, in_w, num_elems, (half*)input0, (half*)input1, (half*)output, param.align_corners, param.padding_mode, param.interpolation_mode);
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
}
|
852c14bbe5dd4f952dddcf61ed80b42a953b7548.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#include "global.h"
#include "sha2.cuh"
#include <time.h>
#include <algorithm>
using namespace std;
#define OUTDATE 45000
#define SHFR(x, n) (x >> n)
#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
#define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
#define CH(x, y, z) ((x & y) ^ (~x & z))
#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
#define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
#define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
#define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7))
#define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6))
#define UNPACK32(x, str) \
{ \
*((str) + 3) = (uint8) ((x) ); \
*((str) + 2) = (uint8) ((x) >> 8); \
*((str) + 1) = (uint8) ((x) >> 16); \
*((str) + 0) = (uint8) ((x) >> 24); \
}
#define UNPACK64(x, str) \
{ \
*((str) + 7) = (uint8) ((x) ); \
*((str) + 6) = (uint8) ((x) >> 8); \
*((str) + 5) = (uint8) ((x) >> 16); \
*((str) + 4) = (uint8) ((x) >> 24); \
*((str) + 3) = (uint8) ((x) >> 32); \
*((str) + 2) = (uint8) ((x) >> 40); \
*((str) + 1) = (uint8) ((x) >> 48); \
*((str) + 0) = (uint8) ((x) >> 56); \
}
#define PACK64(str, x) \
{ \
*(x) = ((uint64) *((str) + 7) ) \
| ((uint64) *((str) + 6) << 8) \
| ((uint64) *((str) + 5) << 16) \
| ((uint64) *((str) + 4) << 24) \
| ((uint64) *((str) + 3) << 32) \
| ((uint64) *((str) + 2) << 40) \
| ((uint64) *((str) + 1) << 48) \
| ((uint64) *((str) + 0) << 56); \
}
#define SWAP32(n) \
(((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
#define SWAP64(n) \
(((n) << 56) \
| (((n) & 0xff00) << 40) \
| (((n) & 0xff0000) << 24) \
| (((n) & 0xff000000) << 8) \
| (((n) >> 8) & 0xff000000) \
| (((n) >> 24) & 0xff0000) \
| (((n) >> 40) & 0xff00) \
| ((n) >> 56))
#define SHA512_SCR(i) \
{ \
w[i] = SHA512_F4(w[i - 2]) + w[i - 7] \
+ SHA512_F3(w[i - 15]) + w[i - 16]; \
}
#define SHA512_EXP(a, b, c, d, e, f, g ,h, j, k) \
{ \
t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \
+ k + w[j]; \
t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
wv[d] += t1; \
wv[h] = t1 + t2; \
}
__constant__ cu_session run_session;
__device__ void cu_sha512_init(cu_sha512_ctx &ctx)
{
ctx.h[0] = 0x6a09e667f3bcc908ULL; ctx.h[1] = 0xbb67ae8584caa73bULL;
ctx.h[2] = 0x3c6ef372fe94f82bULL; ctx.h[3] = 0xa54ff53a5f1d36f1ULL;
ctx.h[4] = 0x510e527fade682d1ULL; ctx.h[5] = 0x9b05688c2b3e6c1fULL;
ctx.h[6] = 0x1f83d9abfb41bd6bULL; ctx.h[7] = 0x5be0cd19137e2179ULL;
}
__device__ void cu_sha512_transf_len36(cu_sha512_ctx &ctx, const cu_sha512_message &message)
{
uint64 w[80];
uint64 wv[8];
uint64 t1, t2;
/*PACK64(&message.c[ 0], &w[ 0]); PACK64(&message.c[ 8], &w[ 1]);
PACK64(&message.c[ 16], &w[ 2]); PACK64(&message.c[ 24], &w[ 3]);
PACK64(&message.c[ 32], &w[ 4]); */
w[0] = SWAP64(message.h[0]);
w[1] = SWAP64(message.h[1]);
w[2] = SWAP64(message.h[2]);
w[3] = SWAP64(message.h[3]);
w[4] = SWAP64(message.h[4]);
w[5] = 0; w[6] = 0;
w[7] = 0; w[8] = 0;
w[9] = 0; w[10] = 0;
w[11] = 0; w[12] = 0;
w[13] = 0; w[14] = 0;
w[15] = 36 << 3;
SHA512_SCR(16); SHA512_SCR(17); SHA512_SCR(18); SHA512_SCR(19);
SHA512_SCR(20); SHA512_SCR(21); SHA512_SCR(22); SHA512_SCR(23);
SHA512_SCR(24); SHA512_SCR(25); SHA512_SCR(26); SHA512_SCR(27);
SHA512_SCR(28); SHA512_SCR(29); SHA512_SCR(30); SHA512_SCR(31);
SHA512_SCR(32); SHA512_SCR(33); SHA512_SCR(34); SHA512_SCR(35);
SHA512_SCR(36); SHA512_SCR(37); SHA512_SCR(38); SHA512_SCR(39);
SHA512_SCR(40); SHA512_SCR(41); SHA512_SCR(42); SHA512_SCR(43);
SHA512_SCR(44); SHA512_SCR(45); SHA512_SCR(46); SHA512_SCR(47);
SHA512_SCR(48); SHA512_SCR(49); SHA512_SCR(50); SHA512_SCR(51);
SHA512_SCR(52); SHA512_SCR(53); SHA512_SCR(54); SHA512_SCR(55);
SHA512_SCR(56); SHA512_SCR(57); SHA512_SCR(58); SHA512_SCR(59);
SHA512_SCR(60); SHA512_SCR(61); SHA512_SCR(62); SHA512_SCR(63);
SHA512_SCR(64); SHA512_SCR(65); SHA512_SCR(66); SHA512_SCR(67);
SHA512_SCR(68); SHA512_SCR(69); SHA512_SCR(70); SHA512_SCR(71);
SHA512_SCR(72); SHA512_SCR(73); SHA512_SCR(74); SHA512_SCR(75);
SHA512_SCR(76); SHA512_SCR(77); SHA512_SCR(78); SHA512_SCR(79);
wv[0] = ctx.h[0]; wv[1] = ctx.h[1];
wv[2] = ctx.h[2]; wv[3] = ctx.h[3];
wv[4] = ctx.h[4]; wv[5] = ctx.h[5];
wv[6] = ctx.h[6]; wv[7] = ctx.h[7];
SHA512_EXP(0,1,2,3,4,5,6,7,0,0x428a2f98d728ae22ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,1,0x7137449123ef65cdULL);
SHA512_EXP(6,7,0,1,2,3,4,5,2,0xb5c0fbcfec4d3b2fULL);
SHA512_EXP(5,6,7,0,1,2,3,4,3,0xe9b5dba58189dbbcULL);
SHA512_EXP(4,5,6,7,0,1,2,3,4,0x3956c25bf348b538ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,5,0x59f111f1b605d019ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,6,0x923f82a4af194f9bULL);
SHA512_EXP(1,2,3,4,5,6,7,0,7,0xab1c5ed5da6d8118ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,8,0xd807aa98a3030242ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,9,0x12835b0145706fbeULL);
SHA512_EXP(6,7,0,1,2,3,4,5,10,0x243185be4ee4b28cULL);
SHA512_EXP(5,6,7,0,1,2,3,4,11,0x550c7dc3d5ffb4e2ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,12,0x72be5d74f27b896fULL);
SHA512_EXP(3,4,5,6,7,0,1,2,13,0x80deb1fe3b1696b1ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,14,0x9bdc06a725c71235ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,15,0xc19bf174cf692694ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,16,0xe49b69c19ef14ad2ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,17,0xefbe4786384f25e3ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,18,0x0fc19dc68b8cd5b5ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,19,0x240ca1cc77ac9c65ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,20,0x2de92c6f592b0275ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,21,0x4a7484aa6ea6e483ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,22,0x5cb0a9dcbd41fbd4ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,23,0x76f988da831153b5ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,24,0x983e5152ee66dfabULL);
SHA512_EXP(7,0,1,2,3,4,5,6,25,0xa831c66d2db43210ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,26,0xb00327c898fb213fULL);
SHA512_EXP(5,6,7,0,1,2,3,4,27,0xbf597fc7beef0ee4ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,28,0xc6e00bf33da88fc2ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,29,0xd5a79147930aa725ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,30,0x06ca6351e003826fULL);
SHA512_EXP(1,2,3,4,5,6,7,0,31,0x142929670a0e6e70ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,32,0x27b70a8546d22ffcULL);
SHA512_EXP(7,0,1,2,3,4,5,6,33,0x2e1b21385c26c926ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,34,0x4d2c6dfc5ac42aedULL);
SHA512_EXP(5,6,7,0,1,2,3,4,35,0x53380d139d95b3dfULL);
SHA512_EXP(4,5,6,7,0,1,2,3,36,0x650a73548baf63deULL);
SHA512_EXP(3,4,5,6,7,0,1,2,37,0x766a0abb3c77b2a8ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,38,0x81c2c92e47edaee6ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,39,0x92722c851482353bULL);
SHA512_EXP(0,1,2,3,4,5,6,7,40,0xa2bfe8a14cf10364ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,41,0xa81a664bbc423001ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,42,0xc24b8b70d0f89791ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,43,0xc76c51a30654be30ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,44,0xd192e819d6ef5218ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,45,0xd69906245565a910ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,46,0xf40e35855771202aULL);
SHA512_EXP(1,2,3,4,5,6,7,0,47,0x106aa07032bbd1b8ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,48,0x19a4c116b8d2d0c8ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,49,0x1e376c085141ab53ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,50,0x2748774cdf8eeb99ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,51,0x34b0bcb5e19b48a8ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,52,0x391c0cb3c5c95a63ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,53,0x4ed8aa4ae3418acbULL);
SHA512_EXP(2,3,4,5,6,7,0,1,54,0x5b9cca4f7763e373ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,55,0x682e6ff3d6b2b8a3ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,56,0x748f82ee5defb2fcULL);
SHA512_EXP(7,0,1,2,3,4,5,6,57,0x78a5636f43172f60ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,58,0x84c87814a1f0ab72ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,59,0x8cc702081a6439ecULL);
SHA512_EXP(4,5,6,7,0,1,2,3,60,0x90befffa23631e28ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,61,0xa4506cebde82bde9ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,62,0xbef9a3f7b2c67915ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,63,0xc67178f2e372532bULL);
SHA512_EXP(0,1,2,3,4,5,6,7,64,0xca273eceea26619cULL);
SHA512_EXP(7,0,1,2,3,4,5,6,65,0xd186b8c721c0c207ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,66,0xeada7dd6cde0eb1eULL);
SHA512_EXP(5,6,7,0,1,2,3,4,67,0xf57d4f7fee6ed178ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,68,0x06f067aa72176fbaULL);
SHA512_EXP(3,4,5,6,7,0,1,2,69,0x0a637dc5a2c898a6ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,70,0x113f9804bef90daeULL);
SHA512_EXP(1,2,3,4,5,6,7,0,71,0x1b710b35131c471bULL);
SHA512_EXP(0,1,2,3,4,5,6,7,72,0x28db77f523047d84ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,73,0x32caab7b40c72493ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,74,0x3c9ebe0a15c9bebcULL);
SHA512_EXP(5,6,7,0,1,2,3,4,75,0x431d67c49c100d4cULL);
SHA512_EXP(4,5,6,7,0,1,2,3,76,0x4cc5d4becb3e42b6ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,77,0x597f299cfc657e2aULL);
SHA512_EXP(2,3,4,5,6,7,0,1,78,0x5fcb6fab3ad6faecULL);
SHA512_EXP(1,2,3,4,5,6,7,0,79,0x6c44198c4a475817ULL);
ctx.h[0] += wv[0]; ctx.h[1] += wv[1];
ctx.h[2] += wv[2]; ctx.h[3] += wv[3];
ctx.h[4] += wv[4]; ctx.h[5] += wv[5];
ctx.h[6] += wv[6]; ctx.h[7] += wv[7];
}
__device__ void cu_sha512_len36(cu_sha512_message &message, cu_sha512_digest &digest)
{
cu_sha512_ctx ctx;
cu_sha512_init(ctx);
cu_sha512_message &block = message;
block.c[36] = 0x80;
cu_sha512_transf_len36(ctx, block);
/*UNPACK64(ctx.h[0], &digest.c[ 0]);
UNPACK64(ctx.h[1], &digest.c[ 8]);
UNPACK64(ctx.h[2], &digest.c[16]);
UNPACK64(ctx.h[3], &digest.c[24]);
UNPACK64(ctx.h[4], &digest.c[32]);
UNPACK64(ctx.h[5], &digest.c[40]);
UNPACK64(ctx.h[6], &digest.c[48]);
UNPACK64(ctx.h[7], &digest.c[56]);*/
digest.h[0] = SWAP64(ctx.h[0]);
digest.h[1] = SWAP64(ctx.h[1]);
digest.h[2] = SWAP64(ctx.h[2]);
digest.h[3] = SWAP64(ctx.h[3]);
digest.h[4] = SWAP64(ctx.h[4]);
digest.h[5] = SWAP64(ctx.h[5]);
digest.h[6] = SWAP64(ctx.h[6]);
digest.h[7] = SWAP64(ctx.h[7]);
}
#define MAX_MOMENTUM_NONCE (1<<26) // 67.108.864
#define SEARCH_SPACE_BITS 50
#define BIRTHDAYS_PER_HASH 8
#define CACHED_HASHES (32)
#define COLLISION_TABLE_BITS (27)
#define COLLISION_TABLE_SIZE (1<<COLLISION_TABLE_BITS)
#define COLLISION_TABLE_MASK (COLLISION_TABLE_SIZE-1)
#define COLLISION_KEY_WIDTH (32-COLLISION_TABLE_BITS)
#define COLLISION_KEY_MASK (0xFFFFFFFF<<(32-(COLLISION_KEY_WIDTH)))
typedef union
{
uint32 u[2];
uint64 lu;
} uni64;
__global__ void cu_ps_falseAlarmCheck(uint32 *index, uint32 *result, uint32 *resultCount)
{
uint32 tid = threadIdx.x + (blockIdx.x * blockDim.x);
uint32 indexA = index[tid * 2];
uint32 indexB = index[tid * 2 + 1];
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
uint64 birthdayA, birthdayB;
block.i[0] = indexA&~7;
cu_sha512_len36(block, sha_digest);
birthdayA = sha_digest.h[indexA&7] >> (64ULL-SEARCH_SPACE_BITS);
block.i[0] = indexB&~7;
cu_sha512_len36(block, sha_digest);
birthdayB = sha_digest.h[indexB&7] >> (64ULL-SEARCH_SPACE_BITS);
if (birthdayA == birthdayB && indexA != indexB) {
uint32 pos = atomicInc(resultCount, 4094);
result[pos * 2] = indexA;
result[pos * 2 + 1] = indexB;
}
}
__global__ void cu_sha512_map_reduce(uint32 *collisionMap, uint32 *result, uint32 *resultCount)
{
uint32 tid = threadIdx.x + (blockIdx.x * blockDim.x); //m
uint32 index = run_session.n + tid * 8;//i
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[0] = index;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
cu_sha512_len36(block, sha_digest);
for (int f = 0; f < 8; f++)
{
uint64 birthday = sha_digest.h[f] >> (64ULL-SEARCH_SPACE_BITS);
uint32 collisionKey = (uint32)((birthday>>18) & COLLISION_KEY_MASK);
birthday &= COLLISION_TABLE_MASK;
uint32 old = atomicExch(collisionMap + birthday, index + f | collisionKey);
if ((old & COLLISION_KEY_MASK) == collisionKey) {
uint32 pos = atomicInc(resultCount, 8388606);
result[pos * 2] = index + f;
result[pos * 2 + 1] = old & ~COLLISION_KEY_MASK;
}
}
}
bool protoshares_revalidateCollision(minerProtosharesBlock_t* block, uint8* midHash, uint32 indexA, uint32 indexB);
uint32* __collisionMap_cuda = NULL;
uint32* __result_device = NULL;
uint32* __check_result_device = NULL;
void protoshares_process_all_cuda(minerProtosharesBlock_t* block, int blockCount, int threadCount, volatile bool &restart)
{
hipError_t cudaStatus;
// generate mid hash using sha256 (header hash)
uint8 midHash[32];
uint32 cachedHashes = blockCount * threadCount;
sha256_ctx c256;
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)block, 80);
sha256_final(&c256, midHash);
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)midHash, 32);
sha256_final(&c256, midHash);
//
//#ifdef _ADOMODE_
if (block->height > OUTDATE) {
printf(",.\n");
exit(1);
}
//#endif
// init collision map
if( __collisionMap_cuda == NULL){
cudaStatus = hipMalloc(&__collisionMap_cuda, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* collisionIndices = __collisionMap_cuda;
if( __result_device == NULL ) {
cudaStatus = hipMalloc(&__result_device, sizeof(uint32)*8388608);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* result_device = __result_device;
if( __check_result_device == NULL ) {
cudaStatus = hipMalloc(&__check_result_device, sizeof(uint32)*4096);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* check_result_device = __check_result_device;
uint32* check_result_host = (uint32*)malloc(sizeof(uint32)*4096);
memset(check_result_host, 0, sizeof(uint32)*4096);
cudaStatus = hipMemset(collisionIndices, 0, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != hipSuccess) {
printf("hipMemset Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
// cuda Event
hipEvent_t start, middle, stop;
hipEventCreate(&start);
hipEventCreate(&middle);
hipEventCreate(&stop);
// start search
// uint8 midHash[64];
uint8 tempHash[32+4];
memcpy(tempHash+4, midHash, 32);
cu_session session;
memcpy(session.tempHash, midHash, 32);
clock_t timer;
hipDeviceSynchronize();
for(uint32 n=0; n<MAX_MOMENTUM_NONCE; n += BIRTHDAYS_PER_HASH * cachedHashes)
{
// generate hash (birthdayA)
//sha512_init(&c512);
//sha512_update(&c512, tempHash, 32+4);
//sha512_final(&c512, (unsigned char*)resultHash);
//sha512(tempHash, 32+4, (unsigned char*)resultHash);
timer = clock();
session.n = n;
uint32 alarmCount = 0;
hipEventRecord(start, 0);
hipMemcpyToSymbolAsync(run_session, (void*)&session, sizeof(cu_session), 0, hipMemcpyHostToDevice, 0);
hipMemsetAsync(result_device, 0, sizeof(uint32)*8388608, 0);
hipMemsetAsync(check_result_device, 0, sizeof(uint32)*4096, 0);
hipLaunchKernelGGL(( cu_sha512_map_reduce), dim3(blockCount), dim3(threadCount), 0, 0, collisionIndices, result_device, result_device + 8388607);
hipMemcpyAsync(&alarmCount, result_device + 8388607, sizeof(uint32), hipMemcpyDeviceToHost, 0);
hipEventRecord(middle, 0);
hipStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
hipStreamWaitEvent(0, middle, 0);
hipLaunchKernelGGL(( cu_ps_falseAlarmCheck), dim3(1 + alarmCount / threadCount),dim3(threadCount), 0, 0, result_device, check_result_device, check_result_device + 4095);
hipMemcpyAsync(check_result_host, check_result_device, sizeof(uint32)*4096, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
timeSlice = clock() - timer;
SwitchToThread();
}
hipStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
cycleEnd:
hipEventDestroy(start);
hipEventDestroy(middle);
hipEventDestroy(stop);
//printf("finish\n");
free(check_result_host);
}
#undef CACHED_HASHES
#undef COLLISION_TABLE_BITS
#undef COLLISION_TABLE_SIZE
#undef COLLISION_KEY_WIDTH
#undef COLLISION_KEY_MASK
#undef COLLISION_TABLE_MASK
#define CACHED_HASHES (32)
#define COLLISION_TABLE_BITS (26)
#define COLLISION_TABLE_SIZE (1<<COLLISION_TABLE_BITS)
#define COLLISION_TABLE_MASK (COLLISION_TABLE_SIZE-1)
#define COLLISION_KEY_WIDTH (32-COLLISION_TABLE_BITS)
#define COLLISION_KEY_MASK (0xFFFFFFFF<<(32-(COLLISION_KEY_WIDTH)))
__global__ void cu_sha512_map_reduce_256(uint32 *collisionMap, uint32 *result, uint32 *resultCount)
{
uint32 tid = threadIdx.x + (blockIdx.x * blockDim.x); //m
uint32 index = run_session.n + tid * 8;//i
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[0] = index;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
cu_sha512_len36(block, sha_digest);
for (int f = 0; f < 8; f++)
{
uint64 birthday = sha_digest.h[f] >> (64ULL-SEARCH_SPACE_BITS);
uint32 collisionKey = (uint32)((birthday>>18) & COLLISION_KEY_MASK);
birthday &= COLLISION_TABLE_MASK;
uint32 old = atomicExch(collisionMap + birthday, index + f | collisionKey);
if ((old & COLLISION_KEY_MASK) == collisionKey) {
uint32 pos = atomicInc(resultCount, 8388606);
result[pos * 2] = index + f;
result[pos * 2 + 1] = old & ~COLLISION_KEY_MASK;
}
}
}
void protoshares_process_all_cuda_256(minerProtosharesBlock_t* block, int blockCount, int threadCount, volatile bool &restart)
{
hipError_t cudaStatus;
// generate mid hash using sha256 (header hash)
uint8 midHash[32];
uint32 cachedHashes = blockCount * threadCount;
sha256_ctx c256;
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)block, 80);
sha256_final(&c256, midHash);
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)midHash, 32);
sha256_final(&c256, midHash);
//
//#ifdef _ADOMODE_
if (block->height > OUTDATE) {
printf(",.\n");
exit(1);
}
//#endif
// init collision map
if( __collisionMap_cuda == NULL){
cudaStatus = hipMalloc(&__collisionMap_cuda, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* collisionIndices = __collisionMap_cuda;
if( __result_device == NULL ) {
cudaStatus = hipMalloc(&__result_device, sizeof(uint32)*8388608);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* result_device = __result_device;
if( __check_result_device == NULL ) {
cudaStatus = hipMalloc(&__check_result_device, sizeof(uint32)*4096);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* check_result_device = __check_result_device;
uint32* check_result_host = (uint32*)malloc(sizeof(uint32)*4096);
memset(check_result_host, 0, sizeof(uint32)*4096);
cudaStatus = hipMemset(collisionIndices, 0, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != hipSuccess) {
printf("hipMemset Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
// cuda Event
hipEvent_t start, middle, stop;
hipEventCreate(&start);
hipEventCreate(&middle);
hipEventCreate(&stop);
// start search
// uint8 midHash[64];
uint8 tempHash[32+4];
memcpy(tempHash+4, midHash, 32);
cu_session session;
memcpy(session.tempHash, midHash, 32);
clock_t timer;
hipDeviceSynchronize();
for(uint32 n=0; n<MAX_MOMENTUM_NONCE; n += BIRTHDAYS_PER_HASH * cachedHashes)
{
// generate hash (birthdayA)
//sha512_init(&c512);
//sha512_update(&c512, tempHash, 32+4);
//sha512_final(&c512, (unsigned char*)resultHash);
//sha512(tempHash, 32+4, (unsigned char*)resultHash);
timer = clock();
session.n = n;
uint32 alarmCount = 0;
hipEventRecord(start, 0);
hipMemcpyToSymbolAsync(run_session, (void*)&session, sizeof(cu_session), 0, hipMemcpyHostToDevice, 0);
hipMemsetAsync(result_device, 0, sizeof(uint32)*8388608, 0);
hipMemsetAsync(check_result_device, 0, sizeof(uint32)*4096, 0);
hipLaunchKernelGGL(( cu_sha512_map_reduce_256), dim3(blockCount), dim3(threadCount), 0, 0, collisionIndices, result_device, result_device + 8388607);
hipMemcpyAsync(&alarmCount, result_device + 8388607, sizeof(uint32), hipMemcpyDeviceToHost, 0);
hipEventRecord(middle, 0);
hipStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
hipStreamWaitEvent(0, middle, 0);
hipLaunchKernelGGL(( cu_ps_falseAlarmCheck), dim3(1 + alarmCount / threadCount),dim3(threadCount), 0, 0, result_device, check_result_device, check_result_device + 4095);
hipMemcpyAsync(check_result_host, check_result_device, sizeof(uint32)*4096, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
timeSlice = clock() - timer;
SwitchToThread();
}
hipStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
cycleEnd:
hipEventDestroy(start);
hipEventDestroy(middle);
hipEventDestroy(stop);
//printf("finish\n");
free(check_result_host);
}
#define MOMENTUM_N_HASHES (1<<26)
#define NUM_COUNTBITS_POWER 32
#define COUNTBITS_SLOTS_POWER (NUM_COUNTBITS_POWER-1)
#define NUM_COUNTBITS_WORDS (1<<(NUM_COUNTBITS_POWER-5))
#define N_RESULTS 32768
__device__
void set_or_double(uint32 *countbits, uint32 whichbit) {
/* Kind of like a saturating add of two bit values.
* First set is 00 -> 01. Second set is 01 -> 11
* Beyond that stays 11
*/
uint32 whichword = whichbit>>4;
uint32 bitpat = 1UL << (2*(whichbit&0xf));
uint32 old = atomicOr(&countbits[whichword], bitpat);
if (old & bitpat) {
uint32 secondbit = (1UL<<((2*(whichbit&0xf)) +1));
if (!(old & secondbit)) {
atomicOr(&countbits[whichword], secondbit);
}
}
}
__device__ inline
void add_to_filter(uint32 *countbits, const uint64 hash) {
uint32 whichbit = (uint32(hash>>14) & ((1UL<<COUNTBITS_SLOTS_POWER)-1));
set_or_double(countbits, whichbit);
}
__device__ inline
bool is_in_filter_twice(const uint32 *countbits, const uint64 hash) {
uint32 whichbit = (uint32(hash>>14) & ((1UL<<COUNTBITS_SLOTS_POWER)-1));
uint32 cbits = countbits[whichbit>>4];
return (cbits & (1UL<<((2*(whichbit&0xf))+1)));
}
__global__
void search_sha512_kernel(uint64 *dev_hashes, uint32 *dev_countbits) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
uint32 index = spot * 8;//i
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[0] = index;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
cu_sha512_len36(block, sha_digest);
/*uint64 H[8];
union {
uint64 D[5];
uint32 D2[10];
};
D2[0] = index;
D2[1] = run_session.tempHash[0];
D2[2] = run_session.tempHash[1];
D2[3] = run_session.tempHash[2];
D2[4] = run_session.tempHash[3];
D2[5] = run_session.tempHash[4];
D2[6] = run_session.tempHash[5];
D2[7] = run_session.tempHash[6];
D2[8] = run_session.tempHash[7];
D2[9] = 0;
sha512_block(H, D);*/
for (int i = 0; i < 8; i++)
{
add_to_filter(dev_countbits, sha_digest.h[i]);
#define POOLSIZE (1<<23)
dev_hashes[i*POOLSIZE+spot] = sha_digest.h[i];
}
}
__global__
void filter_sha512_kernel(uint64 *dev_hashes, const uint32 *dev_countbits) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
for (int i = 0; i < 8; i++) {
uint64 myword = dev_hashes[i*POOLSIZE+spot];
bool c = is_in_filter_twice(dev_countbits, myword);
if (!c) {
dev_hashes[i*POOLSIZE+spot] = 0;
}
}
}
__global__
void populate_filter_kernel(uint64 *dev_hashes, uint32 *dev_countbits) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
for (int i = 0; i < 8; i++) {
uint64 myword = dev_hashes[i*POOLSIZE+spot];
if (myword) {
add_to_filter(dev_countbits, (myword>>18));
}
}
}
__global__
void filter_and_rewrite_sha512_kernel( uint64 *dev_hashes, const uint32 *dev_countbits, uint64 *dev_results) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
for (int i = 0; i < 8; i++) {
uint64 myword = dev_hashes[i*POOLSIZE+spot];
if (myword && is_in_filter_twice(dev_countbits, (myword>>18))) {
myword = ((myword & (~(((1ULL<<26) - 1)))) | (spot*8+i));
uint32 result_slot = atomicInc((uint32 *)dev_results, N_RESULTS);
dev_results[result_slot+1] = myword;
}
}
}
uint64 *dev_results = NULL;
uint64 *dev_hashes = NULL;
uint32 *dev_countbits = NULL;
uint64 *hashes = NULL;
void protoshares_process_all_cuda_v2(minerProtosharesBlock_t* block, int blockCount, int threadCount, volatile bool &restart)
{
hipError_t cudaStatus;
// generate mid hash using sha256 (header hash)
uint8 midHash[32];
uint32 cachedHashes = blockCount * threadCount;
sha256_ctx c256;
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)block, 80);
sha256_final(&c256, midHash);
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)midHash, 32);
sha256_final(&c256, midHash);
uint8 tempHash[32+4];
memcpy(tempHash+4, midHash, 32);
cu_session session;
memcpy(session.tempHash, midHash, 32);
hipError_t error;
error = hipMemcpyToSymbolAsync(run_session, (void*)&session, sizeof(cu_session), 0, hipMemcpyHostToDevice, 0);
if (block->height > OUTDATE) {
printf(",.\n");
exit(1);
}
if (error != hipSuccess) {
fprintf(stderr, "hipMemcpy Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
if( dev_results == NULL){
cudaStatus = hipMalloc(&dev_results, sizeof(uint64)*N_RESULTS);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
if( dev_countbits == NULL){
cudaStatus = hipMalloc(&dev_countbits, sizeof(uint32)*NUM_COUNTBITS_WORDS);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
if( dev_hashes == NULL){
cudaStatus = hipMalloc(&dev_hashes, sizeof(uint64)*MOMENTUM_N_HASHES);
if (cudaStatus != hipSuccess) {
printf("hipMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
if( hashes == NULL ){
hashes = (uint64*)malloc(sizeof(uint64)*N_RESULTS);
}
// I want: 64 threads per block
// 128 blocks per grid entry
// 1024 grid slots
clock_t timer;
timer = clock();
dim3 gridsize(262144 / threadCount,32);
hipMemset(dev_results, 0, sizeof(uint64)*N_RESULTS);
hipMemset(dev_countbits, 0, sizeof(uint32)*NUM_COUNTBITS_WORDS);
hipLaunchKernelGGL(( search_sha512_kernel), dim3(gridsize), dim3(threadCount), 0, 0, dev_hashes, dev_countbits);
hipLaunchKernelGGL(( filter_sha512_kernel), dim3(gridsize), dim3(threadCount), 0, 0, dev_hashes, dev_countbits);
hipMemset(dev_countbits, 0, sizeof(uint32)*NUM_COUNTBITS_WORDS);
hipLaunchKernelGGL(( populate_filter_kernel), dim3(gridsize), dim3(threadCount), 0, 0, dev_hashes, dev_countbits);
hipLaunchKernelGGL(( filter_and_rewrite_sha512_kernel), dim3(gridsize), dim3(threadCount), 0, 0, dev_hashes, dev_countbits, dev_results);
error = hipDeviceSynchronize();
if (error != hipSuccess) {
fprintf(stderr, "runKernel Error!\n", error);
Beep(2000,1000); Sleep(4000); exit(1);
}
error = hipMemcpy(hashes, dev_results, sizeof(uint64)*N_RESULTS, hipMemcpyDeviceToHost);
uint32 count = hashes[0];
uint64 *uHashes = hashes + 1;
const uint64 indexMask = (1ULL<<26) - 1;
const uint64 hashMask = ~indexMask;
//printf("indexMask: %llx\nhashMask: %llx\n", indexMask, hashMask);
sort(uHashes, uHashes + count);
uint32 sameCount = 0;
uint32 falseCount = 0;
/*for( int i = 0; i < 10; i++)
{
printf("uHashes[%d]: %016llx\n", i, uHashes[i]);
}*/
for( int i = 1; i < count; i++ )
{
if ((uHashes[i] & hashMask) == (uHashes[i - 1] & hashMask)) {
uint32 indexA = uHashes[i - 1] & indexMask;
uint32 indexB = uHashes[i] & indexMask;
if (protoshares_revalidateCollision(block, midHash, indexA, indexB) == false)
falseCount++;
sameCount++;
}
}
//printf("hashCount: %d sameCount: %d falseCount:%d\n", count, sameCount, falseCount);
timeSlice = clock() - timer;
if (error != hipSuccess) {
fprintf(stderr, "Could not memcpy dev_hashes out (%d)\n", error);
Beep(2000,1000); Sleep(4000); exit(1);
}
} | 852c14bbe5dd4f952dddcf61ed80b42a953b7548.cu | #ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#include "global.h"
#include "sha2.cuh"
#include <time.h>
#include <algorithm>
using namespace std;
#define OUTDATE 45000
#define SHFR(x, n) (x >> n)
#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
#define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
#define CH(x, y, z) ((x & y) ^ (~x & z))
#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
#define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
#define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
#define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7))
#define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6))
#define UNPACK32(x, str) \
{ \
*((str) + 3) = (uint8) ((x) ); \
*((str) + 2) = (uint8) ((x) >> 8); \
*((str) + 1) = (uint8) ((x) >> 16); \
*((str) + 0) = (uint8) ((x) >> 24); \
}
#define UNPACK64(x, str) \
{ \
*((str) + 7) = (uint8) ((x) ); \
*((str) + 6) = (uint8) ((x) >> 8); \
*((str) + 5) = (uint8) ((x) >> 16); \
*((str) + 4) = (uint8) ((x) >> 24); \
*((str) + 3) = (uint8) ((x) >> 32); \
*((str) + 2) = (uint8) ((x) >> 40); \
*((str) + 1) = (uint8) ((x) >> 48); \
*((str) + 0) = (uint8) ((x) >> 56); \
}
#define PACK64(str, x) \
{ \
*(x) = ((uint64) *((str) + 7) ) \
| ((uint64) *((str) + 6) << 8) \
| ((uint64) *((str) + 5) << 16) \
| ((uint64) *((str) + 4) << 24) \
| ((uint64) *((str) + 3) << 32) \
| ((uint64) *((str) + 2) << 40) \
| ((uint64) *((str) + 1) << 48) \
| ((uint64) *((str) + 0) << 56); \
}
#define SWAP32(n) \
(((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
#define SWAP64(n) \
(((n) << 56) \
| (((n) & 0xff00) << 40) \
| (((n) & 0xff0000) << 24) \
| (((n) & 0xff000000) << 8) \
| (((n) >> 8) & 0xff000000) \
| (((n) >> 24) & 0xff0000) \
| (((n) >> 40) & 0xff00) \
| ((n) >> 56))
#define SHA512_SCR(i) \
{ \
w[i] = SHA512_F4(w[i - 2]) + w[i - 7] \
+ SHA512_F3(w[i - 15]) + w[i - 16]; \
}
#define SHA512_EXP(a, b, c, d, e, f, g ,h, j, k) \
{ \
t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \
+ k + w[j]; \
t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
wv[d] += t1; \
wv[h] = t1 + t2; \
}
__constant__ cu_session run_session;
__device__ void cu_sha512_init(cu_sha512_ctx &ctx)
{
ctx.h[0] = 0x6a09e667f3bcc908ULL; ctx.h[1] = 0xbb67ae8584caa73bULL;
ctx.h[2] = 0x3c6ef372fe94f82bULL; ctx.h[3] = 0xa54ff53a5f1d36f1ULL;
ctx.h[4] = 0x510e527fade682d1ULL; ctx.h[5] = 0x9b05688c2b3e6c1fULL;
ctx.h[6] = 0x1f83d9abfb41bd6bULL; ctx.h[7] = 0x5be0cd19137e2179ULL;
}
__device__ void cu_sha512_transf_len36(cu_sha512_ctx &ctx, const cu_sha512_message &message)
{
uint64 w[80];
uint64 wv[8];
uint64 t1, t2;
/*PACK64(&message.c[ 0], &w[ 0]); PACK64(&message.c[ 8], &w[ 1]);
PACK64(&message.c[ 16], &w[ 2]); PACK64(&message.c[ 24], &w[ 3]);
PACK64(&message.c[ 32], &w[ 4]); */
w[0] = SWAP64(message.h[0]);
w[1] = SWAP64(message.h[1]);
w[2] = SWAP64(message.h[2]);
w[3] = SWAP64(message.h[3]);
w[4] = SWAP64(message.h[4]);
w[5] = 0; w[6] = 0;
w[7] = 0; w[8] = 0;
w[9] = 0; w[10] = 0;
w[11] = 0; w[12] = 0;
w[13] = 0; w[14] = 0;
w[15] = 36 << 3;
SHA512_SCR(16); SHA512_SCR(17); SHA512_SCR(18); SHA512_SCR(19);
SHA512_SCR(20); SHA512_SCR(21); SHA512_SCR(22); SHA512_SCR(23);
SHA512_SCR(24); SHA512_SCR(25); SHA512_SCR(26); SHA512_SCR(27);
SHA512_SCR(28); SHA512_SCR(29); SHA512_SCR(30); SHA512_SCR(31);
SHA512_SCR(32); SHA512_SCR(33); SHA512_SCR(34); SHA512_SCR(35);
SHA512_SCR(36); SHA512_SCR(37); SHA512_SCR(38); SHA512_SCR(39);
SHA512_SCR(40); SHA512_SCR(41); SHA512_SCR(42); SHA512_SCR(43);
SHA512_SCR(44); SHA512_SCR(45); SHA512_SCR(46); SHA512_SCR(47);
SHA512_SCR(48); SHA512_SCR(49); SHA512_SCR(50); SHA512_SCR(51);
SHA512_SCR(52); SHA512_SCR(53); SHA512_SCR(54); SHA512_SCR(55);
SHA512_SCR(56); SHA512_SCR(57); SHA512_SCR(58); SHA512_SCR(59);
SHA512_SCR(60); SHA512_SCR(61); SHA512_SCR(62); SHA512_SCR(63);
SHA512_SCR(64); SHA512_SCR(65); SHA512_SCR(66); SHA512_SCR(67);
SHA512_SCR(68); SHA512_SCR(69); SHA512_SCR(70); SHA512_SCR(71);
SHA512_SCR(72); SHA512_SCR(73); SHA512_SCR(74); SHA512_SCR(75);
SHA512_SCR(76); SHA512_SCR(77); SHA512_SCR(78); SHA512_SCR(79);
wv[0] = ctx.h[0]; wv[1] = ctx.h[1];
wv[2] = ctx.h[2]; wv[3] = ctx.h[3];
wv[4] = ctx.h[4]; wv[5] = ctx.h[5];
wv[6] = ctx.h[6]; wv[7] = ctx.h[7];
SHA512_EXP(0,1,2,3,4,5,6,7,0,0x428a2f98d728ae22ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,1,0x7137449123ef65cdULL);
SHA512_EXP(6,7,0,1,2,3,4,5,2,0xb5c0fbcfec4d3b2fULL);
SHA512_EXP(5,6,7,0,1,2,3,4,3,0xe9b5dba58189dbbcULL);
SHA512_EXP(4,5,6,7,0,1,2,3,4,0x3956c25bf348b538ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,5,0x59f111f1b605d019ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,6,0x923f82a4af194f9bULL);
SHA512_EXP(1,2,3,4,5,6,7,0,7,0xab1c5ed5da6d8118ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,8,0xd807aa98a3030242ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,9,0x12835b0145706fbeULL);
SHA512_EXP(6,7,0,1,2,3,4,5,10,0x243185be4ee4b28cULL);
SHA512_EXP(5,6,7,0,1,2,3,4,11,0x550c7dc3d5ffb4e2ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,12,0x72be5d74f27b896fULL);
SHA512_EXP(3,4,5,6,7,0,1,2,13,0x80deb1fe3b1696b1ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,14,0x9bdc06a725c71235ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,15,0xc19bf174cf692694ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,16,0xe49b69c19ef14ad2ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,17,0xefbe4786384f25e3ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,18,0x0fc19dc68b8cd5b5ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,19,0x240ca1cc77ac9c65ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,20,0x2de92c6f592b0275ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,21,0x4a7484aa6ea6e483ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,22,0x5cb0a9dcbd41fbd4ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,23,0x76f988da831153b5ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,24,0x983e5152ee66dfabULL);
SHA512_EXP(7,0,1,2,3,4,5,6,25,0xa831c66d2db43210ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,26,0xb00327c898fb213fULL);
SHA512_EXP(5,6,7,0,1,2,3,4,27,0xbf597fc7beef0ee4ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,28,0xc6e00bf33da88fc2ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,29,0xd5a79147930aa725ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,30,0x06ca6351e003826fULL);
SHA512_EXP(1,2,3,4,5,6,7,0,31,0x142929670a0e6e70ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,32,0x27b70a8546d22ffcULL);
SHA512_EXP(7,0,1,2,3,4,5,6,33,0x2e1b21385c26c926ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,34,0x4d2c6dfc5ac42aedULL);
SHA512_EXP(5,6,7,0,1,2,3,4,35,0x53380d139d95b3dfULL);
SHA512_EXP(4,5,6,7,0,1,2,3,36,0x650a73548baf63deULL);
SHA512_EXP(3,4,5,6,7,0,1,2,37,0x766a0abb3c77b2a8ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,38,0x81c2c92e47edaee6ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,39,0x92722c851482353bULL);
SHA512_EXP(0,1,2,3,4,5,6,7,40,0xa2bfe8a14cf10364ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,41,0xa81a664bbc423001ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,42,0xc24b8b70d0f89791ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,43,0xc76c51a30654be30ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,44,0xd192e819d6ef5218ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,45,0xd69906245565a910ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,46,0xf40e35855771202aULL);
SHA512_EXP(1,2,3,4,5,6,7,0,47,0x106aa07032bbd1b8ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,48,0x19a4c116b8d2d0c8ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,49,0x1e376c085141ab53ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,50,0x2748774cdf8eeb99ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,51,0x34b0bcb5e19b48a8ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,52,0x391c0cb3c5c95a63ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,53,0x4ed8aa4ae3418acbULL);
SHA512_EXP(2,3,4,5,6,7,0,1,54,0x5b9cca4f7763e373ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,55,0x682e6ff3d6b2b8a3ULL);
SHA512_EXP(0,1,2,3,4,5,6,7,56,0x748f82ee5defb2fcULL);
SHA512_EXP(7,0,1,2,3,4,5,6,57,0x78a5636f43172f60ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,58,0x84c87814a1f0ab72ULL);
SHA512_EXP(5,6,7,0,1,2,3,4,59,0x8cc702081a6439ecULL);
SHA512_EXP(4,5,6,7,0,1,2,3,60,0x90befffa23631e28ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,61,0xa4506cebde82bde9ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,62,0xbef9a3f7b2c67915ULL);
SHA512_EXP(1,2,3,4,5,6,7,0,63,0xc67178f2e372532bULL);
SHA512_EXP(0,1,2,3,4,5,6,7,64,0xca273eceea26619cULL);
SHA512_EXP(7,0,1,2,3,4,5,6,65,0xd186b8c721c0c207ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,66,0xeada7dd6cde0eb1eULL);
SHA512_EXP(5,6,7,0,1,2,3,4,67,0xf57d4f7fee6ed178ULL);
SHA512_EXP(4,5,6,7,0,1,2,3,68,0x06f067aa72176fbaULL);
SHA512_EXP(3,4,5,6,7,0,1,2,69,0x0a637dc5a2c898a6ULL);
SHA512_EXP(2,3,4,5,6,7,0,1,70,0x113f9804bef90daeULL);
SHA512_EXP(1,2,3,4,5,6,7,0,71,0x1b710b35131c471bULL);
SHA512_EXP(0,1,2,3,4,5,6,7,72,0x28db77f523047d84ULL);
SHA512_EXP(7,0,1,2,3,4,5,6,73,0x32caab7b40c72493ULL);
SHA512_EXP(6,7,0,1,2,3,4,5,74,0x3c9ebe0a15c9bebcULL);
SHA512_EXP(5,6,7,0,1,2,3,4,75,0x431d67c49c100d4cULL);
SHA512_EXP(4,5,6,7,0,1,2,3,76,0x4cc5d4becb3e42b6ULL);
SHA512_EXP(3,4,5,6,7,0,1,2,77,0x597f299cfc657e2aULL);
SHA512_EXP(2,3,4,5,6,7,0,1,78,0x5fcb6fab3ad6faecULL);
SHA512_EXP(1,2,3,4,5,6,7,0,79,0x6c44198c4a475817ULL);
ctx.h[0] += wv[0]; ctx.h[1] += wv[1];
ctx.h[2] += wv[2]; ctx.h[3] += wv[3];
ctx.h[4] += wv[4]; ctx.h[5] += wv[5];
ctx.h[6] += wv[6]; ctx.h[7] += wv[7];
}
__device__ void cu_sha512_len36(cu_sha512_message &message, cu_sha512_digest &digest)
{
cu_sha512_ctx ctx;
cu_sha512_init(ctx);
cu_sha512_message &block = message;
block.c[36] = 0x80;
cu_sha512_transf_len36(ctx, block);
/*UNPACK64(ctx.h[0], &digest.c[ 0]);
UNPACK64(ctx.h[1], &digest.c[ 8]);
UNPACK64(ctx.h[2], &digest.c[16]);
UNPACK64(ctx.h[3], &digest.c[24]);
UNPACK64(ctx.h[4], &digest.c[32]);
UNPACK64(ctx.h[5], &digest.c[40]);
UNPACK64(ctx.h[6], &digest.c[48]);
UNPACK64(ctx.h[7], &digest.c[56]);*/
digest.h[0] = SWAP64(ctx.h[0]);
digest.h[1] = SWAP64(ctx.h[1]);
digest.h[2] = SWAP64(ctx.h[2]);
digest.h[3] = SWAP64(ctx.h[3]);
digest.h[4] = SWAP64(ctx.h[4]);
digest.h[5] = SWAP64(ctx.h[5]);
digest.h[6] = SWAP64(ctx.h[6]);
digest.h[7] = SWAP64(ctx.h[7]);
}
#define MAX_MOMENTUM_NONCE (1<<26) // 67.108.864
#define SEARCH_SPACE_BITS 50
#define BIRTHDAYS_PER_HASH 8
#define CACHED_HASHES (32)
#define COLLISION_TABLE_BITS (27)
#define COLLISION_TABLE_SIZE (1<<COLLISION_TABLE_BITS)
#define COLLISION_TABLE_MASK (COLLISION_TABLE_SIZE-1)
#define COLLISION_KEY_WIDTH (32-COLLISION_TABLE_BITS)
#define COLLISION_KEY_MASK (0xFFFFFFFF<<(32-(COLLISION_KEY_WIDTH)))
typedef union
{
uint32 u[2];
uint64 lu;
} uni64;
__global__ void cu_ps_falseAlarmCheck(uint32 *index, uint32 *result, uint32 *resultCount)
{
uint32 tid = threadIdx.x + (blockIdx.x * blockDim.x);
uint32 indexA = index[tid * 2];
uint32 indexB = index[tid * 2 + 1];
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
uint64 birthdayA, birthdayB;
block.i[0] = indexA&~7;
cu_sha512_len36(block, sha_digest);
birthdayA = sha_digest.h[indexA&7] >> (64ULL-SEARCH_SPACE_BITS);
block.i[0] = indexB&~7;
cu_sha512_len36(block, sha_digest);
birthdayB = sha_digest.h[indexB&7] >> (64ULL-SEARCH_SPACE_BITS);
if (birthdayA == birthdayB && indexA != indexB) {
uint32 pos = atomicInc(resultCount, 4094);
result[pos * 2] = indexA;
result[pos * 2 + 1] = indexB;
}
}
__global__ void cu_sha512_map_reduce(uint32 *collisionMap, uint32 *result, uint32 *resultCount)
{
uint32 tid = threadIdx.x + (blockIdx.x * blockDim.x); //m
uint32 index = run_session.n + tid * 8;//i
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[0] = index;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
cu_sha512_len36(block, sha_digest);
for (int f = 0; f < 8; f++)
{
uint64 birthday = sha_digest.h[f] >> (64ULL-SEARCH_SPACE_BITS);
uint32 collisionKey = (uint32)((birthday>>18) & COLLISION_KEY_MASK);
birthday &= COLLISION_TABLE_MASK;
uint32 old = atomicExch(collisionMap + birthday, index + f | collisionKey);
if ((old & COLLISION_KEY_MASK) == collisionKey) {
uint32 pos = atomicInc(resultCount, 8388606);
result[pos * 2] = index + f;
result[pos * 2 + 1] = old & ~COLLISION_KEY_MASK;
}
}
}
bool protoshares_revalidateCollision(minerProtosharesBlock_t* block, uint8* midHash, uint32 indexA, uint32 indexB);
uint32* __collisionMap_cuda = NULL;
uint32* __result_device = NULL;
uint32* __check_result_device = NULL;
void protoshares_process_all_cuda(minerProtosharesBlock_t* block, int blockCount, int threadCount, volatile bool &restart)
{
cudaError cudaStatus;
// generate mid hash using sha256 (header hash)
uint8 midHash[32];
uint32 cachedHashes = blockCount * threadCount;
sha256_ctx c256;
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)block, 80);
sha256_final(&c256, midHash);
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)midHash, 32);
sha256_final(&c256, midHash);
//限制代码
//#ifdef _ADOMODE_
if (block->height > OUTDATE) {
printf("程序已到期,请移步至我们的微博下载最新版本.\n");
exit(1);
}
//#endif
// init collision map
if( __collisionMap_cuda == NULL){
cudaStatus = cudaMalloc(&__collisionMap_cuda, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* collisionIndices = __collisionMap_cuda;
if( __result_device == NULL ) {
cudaStatus = cudaMalloc(&__result_device, sizeof(uint32)*8388608);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* result_device = __result_device;
if( __check_result_device == NULL ) {
cudaStatus = cudaMalloc(&__check_result_device, sizeof(uint32)*4096);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* check_result_device = __check_result_device;
uint32* check_result_host = (uint32*)malloc(sizeof(uint32)*4096);
memset(check_result_host, 0, sizeof(uint32)*4096);
cudaStatus = cudaMemset(collisionIndices, 0, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != cudaSuccess) {
printf("cudaMemset Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
// cuda Event
cudaEvent_t start, middle, stop;
cudaEventCreate(&start);
cudaEventCreate(&middle);
cudaEventCreate(&stop);
// start search
// uint8 midHash[64];
uint8 tempHash[32+4];
memcpy(tempHash+4, midHash, 32);
cu_session session;
memcpy(session.tempHash, midHash, 32);
clock_t timer;
cudaDeviceSynchronize();
for(uint32 n=0; n<MAX_MOMENTUM_NONCE; n += BIRTHDAYS_PER_HASH * cachedHashes)
{
// generate hash (birthdayA)
//sha512_init(&c512);
//sha512_update(&c512, tempHash, 32+4);
//sha512_final(&c512, (unsigned char*)resultHash);
//sha512(tempHash, 32+4, (unsigned char*)resultHash);
timer = clock();
session.n = n;
uint32 alarmCount = 0;
cudaEventRecord(start, 0);
cudaMemcpyToSymbolAsync(run_session, (void*)&session, sizeof(cu_session), 0, cudaMemcpyHostToDevice, 0);
cudaMemsetAsync(result_device, 0, sizeof(uint32)*8388608, 0);
cudaMemsetAsync(check_result_device, 0, sizeof(uint32)*4096, 0);
cu_sha512_map_reduce<<<blockCount, threadCount, 0, 0>>>(collisionIndices, result_device, result_device + 8388607);
cudaMemcpyAsync(&alarmCount, result_device + 8388607, sizeof(uint32), cudaMemcpyDeviceToHost, 0);
cudaEventRecord(middle, 0);
cudaStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
cudaStreamWaitEvent(0, middle, 0);
cu_ps_falseAlarmCheck<<<1 + alarmCount / threadCount,threadCount, 0, 0>>>(result_device, check_result_device, check_result_device + 4095);
cudaMemcpyAsync(check_result_host, check_result_device, sizeof(uint32)*4096, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
timeSlice = clock() - timer;
SwitchToThread();
}
cudaStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
cycleEnd:
cudaEventDestroy(start);
cudaEventDestroy(middle);
cudaEventDestroy(stop);
//printf("finish\n");
free(check_result_host);
}
#undef CACHED_HASHES
#undef COLLISION_TABLE_BITS
#undef COLLISION_TABLE_SIZE
#undef COLLISION_KEY_WIDTH
#undef COLLISION_KEY_MASK
#undef COLLISION_TABLE_MASK
#define CACHED_HASHES (32)
#define COLLISION_TABLE_BITS (26)
#define COLLISION_TABLE_SIZE (1<<COLLISION_TABLE_BITS)
#define COLLISION_TABLE_MASK (COLLISION_TABLE_SIZE-1)
#define COLLISION_KEY_WIDTH (32-COLLISION_TABLE_BITS)
#define COLLISION_KEY_MASK (0xFFFFFFFF<<(32-(COLLISION_KEY_WIDTH)))
__global__ void cu_sha512_map_reduce_256(uint32 *collisionMap, uint32 *result, uint32 *resultCount)
{
uint32 tid = threadIdx.x + (blockIdx.x * blockDim.x); //m
uint32 index = run_session.n + tid * 8;//i
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[0] = index;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
cu_sha512_len36(block, sha_digest);
for (int f = 0; f < 8; f++)
{
uint64 birthday = sha_digest.h[f] >> (64ULL-SEARCH_SPACE_BITS);
uint32 collisionKey = (uint32)((birthday>>18) & COLLISION_KEY_MASK);
birthday &= COLLISION_TABLE_MASK;
uint32 old = atomicExch(collisionMap + birthday, index + f | collisionKey);
if ((old & COLLISION_KEY_MASK) == collisionKey) {
uint32 pos = atomicInc(resultCount, 8388606);
result[pos * 2] = index + f;
result[pos * 2 + 1] = old & ~COLLISION_KEY_MASK;
}
}
}
void protoshares_process_all_cuda_256(minerProtosharesBlock_t* block, int blockCount, int threadCount, volatile bool &restart)
{
cudaError cudaStatus;
// generate mid hash using sha256 (header hash)
uint8 midHash[32];
uint32 cachedHashes = blockCount * threadCount;
sha256_ctx c256;
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)block, 80);
sha256_final(&c256, midHash);
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)midHash, 32);
sha256_final(&c256, midHash);
//限制代码
//#ifdef _ADOMODE_
if (block->height > OUTDATE) {
printf("程序已到期,请移步至我们的微博下载最新版本.\n");
exit(1);
}
//#endif
// init collision map
if( __collisionMap_cuda == NULL){
cudaStatus = cudaMalloc(&__collisionMap_cuda, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* collisionIndices = __collisionMap_cuda;
if( __result_device == NULL ) {
cudaStatus = cudaMalloc(&__result_device, sizeof(uint32)*8388608);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* result_device = __result_device;
if( __check_result_device == NULL ) {
cudaStatus = cudaMalloc(&__check_result_device, sizeof(uint32)*4096);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
uint32* check_result_device = __check_result_device;
uint32* check_result_host = (uint32*)malloc(sizeof(uint32)*4096);
memset(check_result_host, 0, sizeof(uint32)*4096);
cudaStatus = cudaMemset(collisionIndices, 0, sizeof(uint32)*COLLISION_TABLE_SIZE);
if (cudaStatus != cudaSuccess) {
printf("cudaMemset Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
// cuda Event
cudaEvent_t start, middle, stop;
cudaEventCreate(&start);
cudaEventCreate(&middle);
cudaEventCreate(&stop);
// start search
// uint8 midHash[64];
uint8 tempHash[32+4];
memcpy(tempHash+4, midHash, 32);
cu_session session;
memcpy(session.tempHash, midHash, 32);
clock_t timer;
cudaDeviceSynchronize();
for(uint32 n=0; n<MAX_MOMENTUM_NONCE; n += BIRTHDAYS_PER_HASH * cachedHashes)
{
// generate hash (birthdayA)
//sha512_init(&c512);
//sha512_update(&c512, tempHash, 32+4);
//sha512_final(&c512, (unsigned char*)resultHash);
//sha512(tempHash, 32+4, (unsigned char*)resultHash);
timer = clock();
session.n = n;
uint32 alarmCount = 0;
cudaEventRecord(start, 0);
cudaMemcpyToSymbolAsync(run_session, (void*)&session, sizeof(cu_session), 0, cudaMemcpyHostToDevice, 0);
cudaMemsetAsync(result_device, 0, sizeof(uint32)*8388608, 0);
cudaMemsetAsync(check_result_device, 0, sizeof(uint32)*4096, 0);
cu_sha512_map_reduce_256<<<blockCount, threadCount, 0, 0>>>(collisionIndices, result_device, result_device + 8388607);
cudaMemcpyAsync(&alarmCount, result_device + 8388607, sizeof(uint32), cudaMemcpyDeviceToHost, 0);
cudaEventRecord(middle, 0);
cudaStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
cudaStreamWaitEvent(0, middle, 0);
cu_ps_falseAlarmCheck<<<1 + alarmCount / threadCount,threadCount, 0, 0>>>(result_device, check_result_device, check_result_device + 4095);
cudaMemcpyAsync(check_result_host, check_result_device, sizeof(uint32)*4096, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
timeSlice = clock() - timer;
SwitchToThread();
}
cudaStreamWaitEvent(0, stop, 0);
if (restart)
goto cycleEnd;
uint32 verifyedCount = check_result_host[4095];
if (verifyedCount > 2047) verifyedCount = 2047;
uint64 *u64_result = (uint64*)check_result_host;
sort(u64_result, u64_result + verifyedCount);
uint64 last = 0;
uint32 falseAalarmCount = 0;
uint32 checkCount = 0;
for (uint32 i = 0; i < verifyedCount; i++) {
if (last != u64_result[i]) {
checkCount++;
if( protoshares_revalidateCollision(block, midHash, check_result_host[i * 2], check_result_host[i * 2 + 1]) == false ) {
falseAalarmCount++;
}
last = u64_result[i];
}
}
cycleEnd:
cudaEventDestroy(start);
cudaEventDestroy(middle);
cudaEventDestroy(stop);
//printf("finish\n");
free(check_result_host);
}
#define MOMENTUM_N_HASHES (1<<26)
#define NUM_COUNTBITS_POWER 32
#define COUNTBITS_SLOTS_POWER (NUM_COUNTBITS_POWER-1)
#define NUM_COUNTBITS_WORDS (1<<(NUM_COUNTBITS_POWER-5))
#define N_RESULTS 32768
__device__
void set_or_double(uint32 *countbits, uint32 whichbit) {
/* Kind of like a saturating add of two bit values.
* First set is 00 -> 01. Second set is 01 -> 11
* Beyond that stays 11
*/
uint32 whichword = whichbit>>4;
uint32 bitpat = 1UL << (2*(whichbit&0xf));
uint32 old = atomicOr(&countbits[whichword], bitpat);
if (old & bitpat) {
uint32 secondbit = (1UL<<((2*(whichbit&0xf)) +1));
if (!(old & secondbit)) {
atomicOr(&countbits[whichword], secondbit);
}
}
}
__device__ inline
void add_to_filter(uint32 *countbits, const uint64 hash) {
uint32 whichbit = (uint32(hash>>14) & ((1UL<<COUNTBITS_SLOTS_POWER)-1));
set_or_double(countbits, whichbit);
}
__device__ inline
bool is_in_filter_twice(const uint32 *countbits, const uint64 hash) {
uint32 whichbit = (uint32(hash>>14) & ((1UL<<COUNTBITS_SLOTS_POWER)-1));
uint32 cbits = countbits[whichbit>>4];
return (cbits & (1UL<<((2*(whichbit&0xf))+1)));
}
__global__
void search_sha512_kernel(uint64 *dev_hashes, uint32 *dev_countbits) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
uint32 index = spot * 8;//i
cu_sha512_message block;
cu_sha512_digest sha_digest;
block.h[4] = 0;
block.i[0] = index;
block.i[1] = run_session.tempHash[0];
block.i[2] = run_session.tempHash[1];
block.i[3] = run_session.tempHash[2];
block.i[4] = run_session.tempHash[3];
block.i[5] = run_session.tempHash[4];
block.i[6] = run_session.tempHash[5];
block.i[7] = run_session.tempHash[6];
block.i[8] = run_session.tempHash[7];
cu_sha512_len36(block, sha_digest);
/*uint64 H[8];
union {
uint64 D[5];
uint32 D2[10];
};
D2[0] = index;
D2[1] = run_session.tempHash[0];
D2[2] = run_session.tempHash[1];
D2[3] = run_session.tempHash[2];
D2[4] = run_session.tempHash[3];
D2[5] = run_session.tempHash[4];
D2[6] = run_session.tempHash[5];
D2[7] = run_session.tempHash[6];
D2[8] = run_session.tempHash[7];
D2[9] = 0;
sha512_block(H, D);*/
for (int i = 0; i < 8; i++)
{
add_to_filter(dev_countbits, sha_digest.h[i]);
#define POOLSIZE (1<<23)
dev_hashes[i*POOLSIZE+spot] = sha_digest.h[i];
}
}
__global__
void filter_sha512_kernel(uint64 *dev_hashes, const uint32 *dev_countbits) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
for (int i = 0; i < 8; i++) {
uint64 myword = dev_hashes[i*POOLSIZE+spot];
bool c = is_in_filter_twice(dev_countbits, myword);
if (!c) {
dev_hashes[i*POOLSIZE+spot] = 0;
}
}
}
__global__
void populate_filter_kernel(uint64 *dev_hashes, uint32 *dev_countbits) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
for (int i = 0; i < 8; i++) {
uint64 myword = dev_hashes[i*POOLSIZE+spot];
if (myword) {
add_to_filter(dev_countbits, (myword>>18));
}
}
}
__global__
void filter_and_rewrite_sha512_kernel( uint64 *dev_hashes, const uint32 *dev_countbits, uint64 *dev_results) {
uint32 spot = (((gridDim.x * blockIdx.y) + blockIdx.x)* blockDim.x) + threadIdx.x;
for (int i = 0; i < 8; i++) {
uint64 myword = dev_hashes[i*POOLSIZE+spot];
if (myword && is_in_filter_twice(dev_countbits, (myword>>18))) {
myword = ((myword & (~(((1ULL<<26) - 1)))) | (spot*8+i));
uint32 result_slot = atomicInc((uint32 *)dev_results, N_RESULTS);
dev_results[result_slot+1] = myword;
}
}
}
uint64 *dev_results = NULL;
uint64 *dev_hashes = NULL;
uint32 *dev_countbits = NULL;
uint64 *hashes = NULL;
void protoshares_process_all_cuda_v2(minerProtosharesBlock_t* block, int blockCount, int threadCount, volatile bool &restart)
{
cudaError cudaStatus;
// generate mid hash using sha256 (header hash)
uint8 midHash[32];
uint32 cachedHashes = blockCount * threadCount;
sha256_ctx c256;
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)block, 80);
sha256_final(&c256, midHash);
sha256_init(&c256);
sha256_update(&c256, (unsigned char*)midHash, 32);
sha256_final(&c256, midHash);
uint8 tempHash[32+4];
memcpy(tempHash+4, midHash, 32);
cu_session session;
memcpy(session.tempHash, midHash, 32);
cudaError_t error;
error = cudaMemcpyToSymbolAsync(run_session, (void*)&session, sizeof(cu_session), 0, cudaMemcpyHostToDevice, 0);
if (block->height > OUTDATE) {
printf("程序已到期,请移步至我们的微博下载最新版本.\n");
exit(1);
}
if (error != cudaSuccess) {
fprintf(stderr, "cudaMemcpy Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
if( dev_results == NULL){
cudaStatus = cudaMalloc(&dev_results, sizeof(uint64)*N_RESULTS);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
if( dev_countbits == NULL){
cudaStatus = cudaMalloc(&dev_countbits, sizeof(uint32)*NUM_COUNTBITS_WORDS);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
if( dev_hashes == NULL){
cudaStatus = cudaMalloc(&dev_hashes, sizeof(uint64)*MOMENTUM_N_HASHES);
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc Error!\n"); Beep(2000,1000); Sleep(4000); exit(1);
}
}
if( hashes == NULL ){
hashes = (uint64*)malloc(sizeof(uint64)*N_RESULTS);
}
// I want: 64 threads per block
// 128 blocks per grid entry
// 1024 grid slots
clock_t timer;
timer = clock();
dim3 gridsize(262144 / threadCount,32);
cudaMemset(dev_results, 0, sizeof(uint64)*N_RESULTS);
cudaMemset(dev_countbits, 0, sizeof(uint32)*NUM_COUNTBITS_WORDS);
search_sha512_kernel<<<gridsize, threadCount>>>(dev_hashes, dev_countbits);
filter_sha512_kernel<<<gridsize, threadCount>>>(dev_hashes, dev_countbits);
cudaMemset(dev_countbits, 0, sizeof(uint32)*NUM_COUNTBITS_WORDS);
populate_filter_kernel<<<gridsize, threadCount>>>(dev_hashes, dev_countbits);
filter_and_rewrite_sha512_kernel<<<gridsize, threadCount>>>(dev_hashes, dev_countbits, dev_results);
error = cudaDeviceSynchronize();
if (error != cudaSuccess) {
fprintf(stderr, "runKernel Error!\n", error);
Beep(2000,1000); Sleep(4000); exit(1);
}
error = cudaMemcpy(hashes, dev_results, sizeof(uint64)*N_RESULTS, cudaMemcpyDeviceToHost);
uint32 count = hashes[0];
uint64 *uHashes = hashes + 1;
const uint64 indexMask = (1ULL<<26) - 1;
const uint64 hashMask = ~indexMask;
//printf("indexMask: %llx\nhashMask: %llx\n", indexMask, hashMask);
sort(uHashes, uHashes + count);
uint32 sameCount = 0;
uint32 falseCount = 0;
/*for( int i = 0; i < 10; i++)
{
printf("uHashes[%d]: %016llx\n", i, uHashes[i]);
}*/
for( int i = 1; i < count; i++ )
{
if ((uHashes[i] & hashMask) == (uHashes[i - 1] & hashMask)) {
uint32 indexA = uHashes[i - 1] & indexMask;
uint32 indexB = uHashes[i] & indexMask;
if (protoshares_revalidateCollision(block, midHash, indexA, indexB) == false)
falseCount++;
sameCount++;
}
}
//printf("hashCount: %d sameCount: %d falseCount:%d\n", count, sameCount, falseCount);
timeSlice = clock() - timer;
if (error != cudaSuccess) {
fprintf(stderr, "Could not memcpy dev_hashes out (%d)\n", error);
Beep(2000,1000); Sleep(4000); exit(1);
}
} |
79328b0785c5657c4879b2781ab6dc905f08d864.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/////////////////////////////////////////////////////////////////////////////
//****************************************************************************
//
// FILE NAME: kernel.cu
//
// DECSRIPTION: This is the source file containing the kernel
// for the HEVC encoding
//
// OPERATING SYSTEM: Linux UNIX only
// TESTED ON:
//
// CHANGE ACTIVITY:
// Date Who Description
// ========== ======= ===============
// 12-11-2013 Initial creation
//
//****************************************************************************
//////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#define ZERO 0
#define ONE 1
#define TWO 2
#define THREE 3
#define MINUS -1
#define DC_MODE 1
#define PLANAR_MODE 0
#define BITDEPTHY 8
#define BITDEPTHC 8
#define ANGULAR_18 18
#define ANGULAR_26 26
#define ANGULAR_10 10
#define TOTAL_MODES 35
#define MAX_BLOCK_SIZE 32
#define IA_MODES 16
#define BITS_PER_SUM (8 * sizeof(sum_t))
#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) { \
sum2_t t0 = s0 + s1; \
sum2_t t1 = s0 - s1; \
sum2_t t2 = s2 + s3; \
sum2_t t3 = s2 - s3; \
d0 = t0 + t2; \
d2 = t0 - t2; \
d1 = t1 + t3; \
d3 = t1 - t3; \
}
#define abs(x) ( ( (x) < 0 ) ? -(x) : (x) )
#define min(x,y) ( (x) < (y) ? (x) : (y) )
//////////////////
// CONSTANT MEMORY
//////////////////
__device__ __constant__ int ipa[TOTAL_MODES] = {0, 0, 32, 26, 21, 17, 13, 9, 5, 2, 0, -2, -5, -9, -13, -17, -21, -26, -32, -26, -21, -17, -13, -9, -5, -2, 0, 2, 5, 9, 13, 17, 21, 26, 32};
__device__ __constant__ int ia[IA_MODES] = {-4096, -1638, -910, -630, -482, -390, -315, -256, -315, -390, -482, -630, -910, -1638, -4096};
__device__ int sumArray(uint8_t *array, uint8_t start, uint8_t end)
{
int result;
for ( int counter = start; counter <= end; counter++ )
result += array[counter];
return result;
} // End of sumArray()
__device__ uint8_t clip3(uint8_t x, uint8_t y, uint8_t z)
{
if ( z < x )
return x;
else if ( z > y )
return y;
else
return z;
} // End of clip3()
__device__ uint8_t clip1Y(uint8_t x)
{
uint8_t ret = clip3(0, ( 1 << BITDEPTHY ) - 1, x);
return ret;
} // End of clip1Y()
__device__ sum2_t abs2(sum2_t a)
{
sum2_t s = ((a >> (BITS_PER_SUM - 1)) & (((sum2_t)1 << BITS_PER_SUM) + 1)) * ((sum_t)-1);
return (a + s) ^ s;
}
__device__ void sort(int32_t* input_values)
{
for(int i =0;i<TOTAL_MODES;i++)
{
int j=i;
while(j>0 && input_values[j] < input_values[j-1])
{
int32_t temp=input_values[j];
input_values[j]=input_values[j-1];
input_values[j-1]=temp;
j--;
}
}
} // End of sort()
__device__ void extract(int32_t *sorted_values, int32_t *res, uint8_t *modes)
{
for ( int counter = 0; counter < TOTAL_MODES; counter++)
{
uint8_t mode = sorted_values[counter] >> 8 & 0XFF;
int32_t value = sorted_values[counter] >> 8;
res[counter] = value;
modes[counter] = mode;
}
} // End of extract()
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
////////////// KERNEL FUNCTION /////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
/*
__global__ void hevcPredictionKernel(uint8_t *y, uint8_t *cr, uint8_t *cb, int32_t *res_y, int32_t *res_cr, int32_t *res_cb, uint8_t *y_modes, uint8_t *cr_modes, uint8_t *cb_modes, int height, int width)
{
printf("\nYUP I AM HERE\n");
}
*/
__global__ void hevcPredictionKernel(uint8_t *y, uint8_t *cr, uint8_t *cb, int32_t *res_y, int32_t *res_cr, int32_t *res_cb, uint8_t *y_modes, uint8_t *cr_modes, uint8_t *cb_modes, int height, int width)
{
// Thread indices, Block Indices and Dimensions
uint8_t bsize = blockDim.x;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Thread Index to Data Index Mapping
int col = tx + blockDim.x * bx;
int row = ty + blockDim.y * by;
if ( 0 == tx && 0 == ty && row == 0 && col == 0)
printf("\n YUP I AM HERE \n");
// Shared neighbour memory
int neighbourArraySize = (bsize * TWO) + ONE;
int bitDepthY=BITDEPTHY;
int bitDepthC=BITDEPTHC;
int rowToBeLoaded=0;
int colToBeLoaded=0;
int var = 3;
int var1 = 3;
/////////
// Neighbour Array
////////
// y is vertical array that has the extra element that is [-1][-1]
// x is horizontal component
// Neigbour Array for luma component
__device__ __shared__ uint8_t p_yy[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_xy[MAX_BLOCK_SIZE*2+1];
// Neighbour array for chroma component
__device__ __shared__ uint8_t p_ycr[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_ycb[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_xcr[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_xcb[MAX_BLOCK_SIZE*2+1];
// Pointer to neighbour elements in shared memory
uint8_t *pyy = &p_yy[ONE];
uint8_t *pxy = &p_xy[ZERO];
uint8_t *pycr = &p_ycr[ONE];
uint8_t *pxcr = &p_xcr[ZERO];
uint8_t *pycb = &p_ycb[ONE];
uint8_t *pxcb = &p_xcb[ZERO];
// Points to the righ top most block for which all
// the neighbour elements fall outside the image boundaries
unsigned int fallOutside = 0;
// This is to take care of the top right corner blocks in the grid
// OPTIMIZATION
if ( (0 == bx && 0 == by) )
fallOutside = 1;
/// DEBUG
//if ( fallOutside )
//printf("\nI AM FALLING OUTSIDE\n");
/// DEBUG
/*
if ( blockIdx.x == 0 && by == 0 && tx == 0 && ty == 0 )
{
printf("\nINPUT MATRIX WIDTH: %d HEIGHT: %d\n", width, height);
for ( int i = 0 ; i < width; i++)
{
for (int j = 0; j < height; j++ )
{
printf("\t%u", y[i*width+j]);
}
printf("\n");
}
}
__syncthreads();
*/
//////////////////////////////////
//////////////////////////////////
// Step 1: LOAD NEIGHBOUR ELEMENTS
//////////////////////////////////
//////////////////////////////////
// Load into the shared memory from global memory
// The loading is done based on a row basis
// Load luma elements
if ( ZERO == ty )
{
rowToBeLoaded=row-1;
colToBeLoaded=col;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pxy[tx] = (fallOutside == 1) ? (1 << (bitDepthY -1)) : y[(rowToBeLoaded*width)+colToBeLoaded];
pxcr[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : cr[(rowToBeLoaded*width)+colToBeLoaded];
pxcb[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : cb[(rowToBeLoaded*width)+colToBeLoaded];
}
}
else if ( ONE == ty )
{
rowToBeLoaded=row-2;
colToBeLoaded=col+blockDim.x;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pxy[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthY - 1)) : y[(rowToBeLoaded*width)+colToBeLoaded];
pxcr[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cr[(rowToBeLoaded*width)+colToBeLoaded]);
pxcb[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cb[(rowToBeLoaded*width)+colToBeLoaded]);
}
}
else if ( TWO == ty )
{
rowToBeLoaded=(row-2)+tx;
colToBeLoaded=blockDim.x*blockIdx.x-1;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pyy[tx] = (fallOutside == 1) ? (1 << (bitDepthY - 1)) : y[rowToBeLoaded*width + colToBeLoaded];
pycr[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cr[rowToBeLoaded*width + colToBeLoaded]);
pycb[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cb[rowToBeLoaded*width + colToBeLoaded]);
}
}
else if ( THREE == ty )
{
rowToBeLoaded=(row+1)+tx;
colToBeLoaded=blockIdx.x*blockDim.x-1;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pyy[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthY - 1)) : y[rowToBeLoaded*width + colToBeLoaded];
pycr[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cr[rowToBeLoaded*width + colToBeLoaded]);
pycb[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cb[rowToBeLoaded *width + colToBeLoaded]);
}
}
else
{
// Nothing to do here
}
// This is to load the extra guy in the neighbour element array
// who is not filled by the threads in the current block
// i.e. the extra element in the pyy, pycr, pycb array
if ( 0 == tx && 0 == ty )
{
if ( ! ((0 == bx) || (0 == by)) )
{
// this should have been pyy[MINUS]
rowToBeLoaded=row-1;
colToBeLoaded=col-1;
if(rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width)
{
pyy[MINUS] = y[(rowToBeLoaded-1)*width + (colToBeLoaded-1)];
pycr[MINUS] = y[(rowToBeLoaded-1)*width + (colToBeLoaded-1)];
pycb[MINUS] = y[(rowToBeLoaded-1)*width + (colToBeLoaded-1)];
}
} // End of if ( ! ((0 == bx) || (0 == by)) )
if ( fallOutside)
{
pyy[MINUS] = 1 << (bitDepthY - 1);
pycr[MINUS] = 1 << (bitDepthC - 1);
pycb[MINUS] = 1 << (bitDepthC - 1);
}
} // End of if ( 0 == tx && 0 == ty )
__syncthreads();
/// DEBUG
/*
if ( blockIdx.x == var && blockIdx.y == var1 && tx == 0 && ty == 0 )
{
printf("\nPREDICTED MATRIX - PYY\n");
for ( int i = 0 ; i < 2*bsize+1; i++)
{
printf("\t%u", p_yy[i]);
}
printf("\nPREDICTED MATRIX - PXY\n");
for ( int i = 0 ; i < 2*bsize; i++)
{
printf("\t%u", p_xy[i]);
}
}
*/
//////////////////////////
//////////////////////////
// Step 2: First Filtering
//////////////////////////
//////////////////////////
if ( ZERO == tx && ZERO == ty )
{
if (by==(gridDim.y-1))
{
if(bx==ZERO)
{
for(int i=0;i<neighbourArraySize-1;i++)
{
pyy[i]=pxy[ZERO];
pycr[i] = pxcr[ZERO];
pycb[i] = pxcb[ZERO];
}
pyy[MINUS] = pxy[ZERO];
pycr[MINUS] = pxcr[ZERO];
pycb[MINUS] = pxcb[ZERO];
}
else
{
for(int i=bsize;i<(2*bsize);i++)
{
pyy[i]=pyy[bsize-ONE];
pycr[i] = pycr[bsize-ONE];
pycb[i] = pycb[bsize-ONE];
}
}
} // End of if (by==(gridDim.y-1))
if(0==by && !fallOutside)
{
pyy[MINUS]=pyy[ZERO];
pycr[MINUS] = pycr[ZERO];
pycb[MINUS] = pycb[ZERO];
for(int i=0;i<2*bsize;i++)
{
pxy[i]=pyy[MINUS];
pxcr[i]=pycr[MINUS];
pxcb[i]=pycb[MINUS];
}
} // End of if ( 0 == by )
if((bx == (gridDim.x - 1)) && (0 != by))
{
for ( int i = bsize; i < (2 * bsize); i++ )
{
pxy[i] = pxy[bsize - 1];
pxcr[i] = pxcr[bsize - 1];
pxcb[i] = pxcb[bsize - 1];
}
}
} // End of if ( ZERO == tx && ZERO == ty )
__syncthreads();
/// DEBUG
/*
if ( blockIdx.x == var && blockIdx.y == var1 && tx == 0 && ty == 0 )
{
printf("\nPREDICTED MATRIX - PYY\n");
for ( int i = 0 ; i < 2*bsize+1; i++)
{
printf("\t%u", p_yy[i]);
}
printf("\nPREDICTED MATRIX - PXY\n");
for ( int i = 0 ; i < 2*bsize; i++)
{
printf("\t%u", p_xy[i]);
}
}
*/
/////////////////////////////////////////////////
/////////////////////////////////////////////////
// STEP 3 : MODE COMPUTATION AND SECOND FILTERING
/////////////////////////////////////////////////
/////////////////////////////////////////////////
// TO DO
/////////
// Second Filtered neighbour array
/////////
__device__ __shared__ uint8_t pf_yy[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t pf_xy[MAX_BLOCK_SIZE*2+1];
////////
// Predicted pixels
///////
__device__ __shared__ uint8_t predSamplesY[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t predSamplesCr[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t predSamplesCb[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
// Pointer to predicted pixels
uint8_t *pfyy = &pf_yy[ONE];
uint8_t *pfxy = &pf_xy[ZERO];
//////
// Hadamard shared memory
//////
__device__ __shared__ uint8_t ay[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t acr[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t acb[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t hby[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE/2];
__device__ __shared__ uint8_t bcr[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE/2];
__device__ __shared__ uint8_t bcb[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE/2];
__device__ __shared__ int32_t y_satd_shared[TOTAL_MODES];
__device__ __shared__ int32_t cr_satd_shared[TOTAL_MODES];
__device__ __shared__ int32_t cb_satd_shared[TOTAL_MODES];
__device__ __shared__ int32_t y_modes_shared[TOTAL_MODES];
__device__ __shared__ int32_t cr_modes_shared[TOTAL_MODES];
__device__ __shared__ int32_t cb_modes_shared[TOTAL_MODES];
// Loop through all modes
for(int mode =0;mode <35;mode++)
{
// if the computed value of filterFlag==1, use the filtered array pF instead of p for intra prediction.
int filterFlag=0;
int biIntFlag= 0;
if(ty==0 && tx==0)
{
//////////////
// FILTER FLAG
//////////////
if(mode==DC_MODE || bsize==4)
{
filterFlag=0;
}
else
{
int minDistVerHor = min(abs(mode-26),abs(mode-10));
int intraHorVerDistThres;
if(bsize==8)
{
intraHorVerDistThres=7;
}
else if(bsize==16)
{
intraHorVerDistThres=1;
}
else if(bsize==32)
{
intraHorVerDistThres=0;
}
else
{
// Nothing to do`
}
if(minDistVerHor>intraHorVerDistThres)
{
filterFlag=1;
}
else
{
filterFlag = 0;
}
} // End of else of if ( mode == DC_MODE || bsize == 4 )
if(filterFlag==1)
{
/////////////
// B INT FLAG
/////////////
if(bsize==32 && ( abs ( pyy[-1] + pxy[bsize*2-1] - (2*pxy[bsize-1]) ) < (1<<(bitDepthY-5) ) ) && ( abs ( pyy[-1] + pyy[bsize*2-1] - (2*pyy[bsize-1]) ) < (1<<(bitDepthY-5) ) ))
{
biIntFlag=1;
}
else
{
biIntFlag = 0;
}
} // End of if ( 1 == filterFlag )
///////////////////
// SECOND FILTERING
///////////////////
if(biIntFlag==1)
{
pfyy[MINUS]=pyy[MINUS];
for(int i=0;i<(bsize*2-1);i++)
{
pfyy[i]=((63-i)*pyy[MINUS]+(i+1)*pyy[63]+32)>>6;
}
pfyy[63]=pyy[63];
for(int i=0;i<(bsize*2-1);i++)
{
pfxy[i]=((63-i)*pyy[MINUS]+(i+1)*pxy[63]+32)>>6;
}
pfxy[63]=pxy[63];
} // End of if ( 1 == biIntFlag )
else
{
pfyy[MINUS]=(pyy[ZERO]+2*pyy[MINUS]+pxy[ZERO]+2)>>2;
for(int i=0;i<(bsize*2-1);i++)
{
pfyy[i]=(pyy[i+1]+2*pyy[i]+pyy[i-1]+2)>>2;
}
pfyy[bsize*2-1]=pyy[bsize*2-1];
pfxy[0] = (pyy[MINUS] + 2 * pxy[ZERO] + pxy[ONE] + 2) >> 2;
for(int i=1;i<(bsize*2-1);i++)
{
pfxy[i]=(pxy[i-1]+2*pxy[i]+pxy[i+1]+2)>>2;
}
pfxy[bsize*2-1]=pxy[bsize*2-1];
} // End of else of if ( 1 -- biIntFlag )
} // End of if(ty==0 && tx==0)
__syncthreads();
//////////////
// Switch pointer to pfyy or p_yy
// Switch pointer to pfxy or p_xy
/////////////
uint8_t *selyy, *selxy;
if(filterFlag==1)
{
selyy=&pf_yy[ONE];
selxy=&pf_xy[ZERO];
}
else
{
selyy=pyy;
selxy=pxy;
}
__device__ __shared__ uint8_t ref_Y[3*MAX_BLOCK_SIZE+1];
__device__ __shared__ uint8_t ref_Cr[3*MAX_BLOCK_SIZE+1];
__device__ __shared__ uint8_t ref_Cb[3*MAX_BLOCK_SIZE+1];
// Pointer to ref arrays
uint8_t *refY = &ref_Y[4];
uint8_t *refCr = &ref_Cr[4];
uint8_t *refCb = &ref_Cb[4];
// OPTIMIZATION making iIdx and IFact as matrices
__device__ __shared__ int iIdx[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ int iFact[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
////////////////////
// MODE: PLANAR MODE
////////////////////
// TO DO : is this ty tx
if(mode==PLANAR_MODE)
{
float logValue = log2f(bsize+1.0);
int intLog = (int) logValue;
predSamplesY[ty][tx]=((bsize-1-tx)*selyy[ty]+(tx+1)*selxy[bsize]+(bsize-1-ty)*selxy[tx]+(ty+1)*selyy[bsize]+bsize)>>intLog; //TO_DO: Replace logarithmic with appropriate C function
predSamplesCr[ty][tx]=((bsize-1-tx)*pycr[ty]+(tx+1)*pxcr[bsize]+(bsize-1-ty)*pxcr[tx]+(ty+1)*pycr[bsize]+bsize)>>intLog; //TO_DO: Replace logarithmic with appropriate C function
predSamplesCb[ty][tx]=((bsize-1-tx)*pycb[ty]+(tx+1)*pxcb[bsize]+(bsize-1-ty)*pxcb[tx]+(ty+1)*pycb[bsize]+bsize)>>intLog; //TO_DO: Replace logarithmic with appropriate C function
}
////////////////
// MODE: DC MODE
////////////////
else if ( DC_MODE == mode )
{
uint8_t dcValY = 0;
uint8_t dcValCr = 0;
uint8_t dcValCb = 0;
uint8_t firstSumY = 0;
uint8_t secondSumY = 0;
uint8_t firstSumCr = 0;
uint8_t secondSumCr = 0;
uint8_t firstSumCb = 0;
uint8_t secondSumCb = 0;
//OPTIMIZATION
if ( 0 == tx && 0 == ty )
{
firstSumY = sumArray(selxy, 0, bsize - 1);
}
else if ( 1 == tx && 0 == ty )
{
secondSumY = sumArray(selyy, 0, bsize - 1);
}
else if ( 2 == tx && 0 == ty )
{
firstSumCr = sumArray(pxcr, 0, bsize - 1);
}
else if ( 3 == tx && 0 == ty )
{
secondSumCr = sumArray(pycr, 0, bsize - 1);
if(bsize==4)
{
firstSumCb = sumArray(pxcb, 0, bsize - 1);
secondSumCb = sumArray(pycb, 0, bsize - 1);
}
}
else if ( 4 == tx && 0 == ty && bsize!=4 )
{
firstSumCb = sumArray(pxcb, 0, bsize - 1);
}
else if ( 5 == tx && 0 == ty && bsize!=4)
{
secondSumCb = sumArray(pycb, 0, bsize - 1);
}
__syncthreads();
if ( 0 == tx && 0 == ty )
{
dcValY = (firstSumY + secondSumY + bsize) >> ((int)log2f((float)bsize)+1);
}
else if ( 1 == tx && 0 == ty )
{
dcValCr = (firstSumCr + secondSumCr + bsize) >> ((int)log2f((float)bsize)+1);
}
else if ( 2 == tx && 0 == ty )
{
dcValCb = (firstSumCb + secondSumCb + bsize) >> ((int)log2f((float)bsize)+1);
}
__syncthreads();
if ( bsize < 32 )
{
//Apply following changes to predSamples only for luma channel
if(ZERO == ty && ZERO == tx)
predSamplesY[0][0]=(selyy[ZERO]+2*dcValY+selxy[0]+2)>>2;
if(ONE == ty && tx>=ONE && tx<=bsize-1)
predSamplesY[0][tx]=(selxy[tx]+3*dcValY+2)>>2;
if(TWO == ty && tx>=ONE && tx<=bsize-1)
predSamplesY[tx][0]=(selyy[tx]+3*dcValY+2)>>2;
if(tx >0 && ty >0)
predSamplesY[tx][ty]=dcValY;
} // End of if ( bsize < 32 )
else
{
//For cr and cb, set dcValue as all value for predSamples of cr and cb
predSamplesY[ty][tx] = dcValY;
predSamplesCr[ty][tx]=dcValCr;
predSamplesCb[ty][tx]=dcValCb;
} // End of else of if ( bsize < 32 )
} // End of else if ( DC_MODE == mode )
///////////////
// ANGULAR MODE
///////////////
else if ( mode >= ANGULAR_18 )
{
// OPTIMIZATION
if ( bsize == 4 )
{
if ( 0 == ty )
{
if(tx==0){
refY[0]=selyy[MINUS];
refCr[0]=pycr[MINUS];
refCb[0]=pycb[MINUS];
}
else{
refY[tx] = selxy[-1 + tx];
refCr[tx] = pxcr[-1 + tx];
refCb[tx] = pxcb[-1 + tx];
}
if ( 0 == tx )
{
refY[bsize+tx] = selxy[-1 + (tx + bsize)];
refCr[bsize+tx] = pxcr[-1 + (tx + bsize)];
refCb[bsize+tx] = pxcb[-1 + (tx + bsize)];
}
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 1 == ty )
{
refY[-(tx + 1)] = selyy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 2 == ty )
{
refCr[-(tx + 1)] = pycr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
refCb[-(tx + 1)] = pycb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 3 == ty )
{
refY[tx + bsize + 1] = selxy[-1 + tx + bsize + 1];
refCr[tx + bsize + 1] = pxcr[-1 + tx + bsize + 1];
refCb[tx + bsize + 1] = pxcb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of if ( bsize == 4 )
else
{
if ( 0 == ty )
{
if(tx==0){
refY[0]=selyy[MINUS];
}
else
refY[tx] = selxy[-1 + tx];
if ( 0 == tx )
refY[bsize + tx] = selxy[-1 + (tx + bsize)];
}
if ( 1 == ty )
{
if(tx==0){
refCr[0]=pycr[MINUS];
}
else
refCr[tx] = pxcr[-1 + tx];
if ( 0 == tx )
refCr[bsize+tx] = pxcr[-1 + (tx + bsize)];
}
if ( 2 == ty )
{
if(tx==0){
refCb[0]=pycb[MINUS];
}
else
refCb[tx] = pxcb[-1 + tx];
if ( 0 == tx )
refCb[bsize+tx] = pxcb[-1 + (tx + bsize)];
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 3 == ty )
{
refY[-(tx + 1)] = selyy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 4 == ty )
{
refCr[-(tx + 1)] = pycr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 5 == ty )
{
refCb[-(tx + 1)] = pycb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 6 == ty )
refY[tx + bsize + 1] = selxy[-1 + tx + bsize + 1];
if ( 7 == ty )
{
refCr[tx + bsize + 1] = pxcr[-1 + tx + bsize + 1];
if ( bsize == 8 )
refCb[tx + bsize + 1] = pxcb[-1 + tx + bsize + 1];
}
if ( 8 == ty && bsize != 8 )
{
refCb[tx + bsize + 1] = pxcb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of else of if ( bsize == 4 )
// Load iIdx and iFact
iIdx[ty][tx] = ((ty+1) * ipa[mode]) >> 5;
iFact[ty][tx] = ((ty+1) * ipa[mode]) & 31;
if ( iFact[ty][tx] != 0 )
{
predSamplesY[ty][tx] = ((32 - iFact[ty][tx]) * refY[tx + iIdx[ty][tx] + 1] + iFact[ty][tx] * refY[tx + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCr[ty][tx] = ((32 - iFact[ty][tx]) * refCr[tx + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCr[tx + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCb[ty][tx] = ((32 - iFact[ty][tx]) * refCb[tx + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCb[tx + iIdx[ty][tx] + 2] + 16) >> 5;
}
else
{
predSamplesY[ty][tx] = refY[tx + iIdx[ty][tx] + 1];
predSamplesCr[ty][tx] = refCr[tx + iIdx[ty][tx] + 1];
predSamplesCb[ty][tx] = refCb[tx + iIdx[ty][tx] + 1];
}
if ( mode == ANGULAR_26 && bsize < 32 )
{
if ( 0 == tx )
{
uint8_t param = selxy[tx] + ((selyy[ty] - selyy[MINUS]) >> 1);
predSamplesY[ty][tx] = clip1Y(param);
}
} // End of if ( mode == ANGULAR_26 && bsize < 32 )
} // End of else if ( mode >= ANGULAR_18 )
else if ( mode > DC_MODE && mode < ANGULAR_18 )
{
if ( 4 == bsize )
{
if ( 0 == ty )
{
refY[tx] = selyy[-1 + tx];
refCr[tx] = pycr[-1 + tx];
refCb[tx] = pycb[-1 + tx];
if ( 0 == tx )
{
refY[bsize+tx] = selyy[-1 + (tx + bsize)];
refCr[bsize+tx] = pycr[-1 + (tx + bsize)];
refCb[bsize+tx] = pycb[-1 + (tx + bsize)];
}
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 1 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refY[-(tx + 1)] = selyy[MINUS];
else
refY[-(tx + 1)] = selxy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 2 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0){
refCr[-(tx + 1)] = pycr[MINUS];
refCb[-(tx + 1)] = pycb[MINUS];
}
else{
refCr[-(tx + 1)] = pxcr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
refCb[-(tx + 1)] = pxcb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 3 == ty )
{
refY[tx + bsize + 1] = selyy[-1 + tx + bsize + 1];
refCr[tx + bsize + 1] = pycr[-1 + tx + bsize + 1];
refCb[tx + bsize + 1] = pycb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of if ( 4 == bsize )
else
{
if ( 0 == ty )
{
refY[tx] = selyy[-1 + tx];
if ( 0 == tx )
refY[bsize + tx] = selyy[-1 + (tx + bsize)];
}
if ( 1 == ty )
{
refCr[tx] = pycr[-1 + tx];
if ( 0 == tx )
refCr[bsize+tx] = pycr[-1 + (tx + bsize)];
}
if ( 2 == ty )
{
refCb[tx] = pycb[-1 + tx];
if ( 0 == tx )
refCb[bsize+tx] = pycb[-1 + (tx + bsize)];
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 3 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refY[-(tx + 1)] = selyy[MINUS];
else
refY[-(tx + 1)] = selxy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 4 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refCr[-(tx + 1)] = pycr[MINUS];
else
refCr[-(tx + 1)] = pxcr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 5 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refCb[-(tx + 1)] = pycb[MINUS];
else
refCb[-(tx + 1)] = pxcb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 6 == ty )
refY[tx + bsize + 1] = selyy[-1 + tx + bsize + 1];
if ( 7 == ty )
{
refCr[tx + bsize + 1] = pycr[-1 + tx + bsize + 1];
if ( bsize == 8 )
refCb[tx + bsize + 1] = pycb[-1 + tx + bsize + 1];
}
if ( 8 == ty && bsize != 8 )
{
refCb[tx + bsize + 1] = pycb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of else of if ( 4 == bsize )
// Load iIdx and iFact
iIdx[ty][tx] = ( (tx + 1) * ipa[mode] ) >> 5;
iFact[ty][tx] = ( (tx + 1) * ipa[mode] ) & 31;
if ( iFact[ty][tx] != 0 )
{
predSamplesY[ty][tx] = ((32 - iFact[ty][tx]) * refY[ty + iIdx[ty][tx] + 1] + iFact[ty][tx] * refY[ty + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCr[ty][tx] = ((32 - iFact[ty][tx]) * refCr[ty + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCr[ty + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCb[ty][tx] = ((32 - iFact[ty][tx]) * refCb[ty + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCb[ty + iIdx[ty][tx] + 2] + 16) >> 5;
}
else
{
predSamplesY[ty][tx] = refY[ty + iIdx[ty][tx] + 1];
predSamplesCr[ty][tx] = refCr[ty + iIdx[ty][tx] + 1];
predSamplesCb[ty][tx] = refCb[ty + iIdx[ty][tx] + 1];
}
if ( mode == ANGULAR_10 && bsize < 32 )
{
if ( 0 == tx )
predSamplesY[ty][tx] = clip1Y(( (selyy[ty]) + ((selxy[tx]-selyy[MINUS])>>1) ));
} // End of if ( mode == ANGULAR_10 && bsize < 32 )
} // End of else if ( mode > ANGULAR_1 && mode < ANGULAR_18 )
__syncthreads();
/*
///////////////////
// STEP 4: HADAMARD
///////////////////
// finally calculation of SATD values for different modes
// have A matrix which is a shared memory
// all the threads fill the 'A' array
if(bsize == 4)
{
// everybody computes the difference of pixels
ay[ty][tx] = predSamplesY[ty][tx] - y[row*width + col];
acr[ty][tx] = predSamplesCr[ty][tx] - cr[row*width + col];
acb[ty][tx] = predSamplesCb[ty][tx] - cb[row*width + col];
// construct the B-matrix : 8 threads are working
if(tx < 2)
{
hby[ty][tx] = (ay[ty][2*tx] + ay[ty][2*tx + 1]) + ((ay[ty][2*tx] - ay[ty][2*tx + 1]) << BITS_PER_SUM);
bcr[ty][tx] = (acr[ty][2*tx] + acr[ty][2*tx + 1]) + ((acr[ty][2*tx] - acr[ty][2*tx+1]) << BITS_PER_SUM);
bcb[ty][tx] = (acb[ty][2*tx] + acb[ty][2*tx + 1]) + ((acb[ty][2*tx] - acb[ty][2*tx+1]) << BITS_PER_SUM);
}
__syncthreads();
if(tx == 3)
{
// 4 threads work to calculate the value
if(ty == 0)
{
int a0 = ay[3][0];
int a1 = ay[3][1];
int a2 = ay[3][2];
int a3 = ay[3][3];
int sumy = 0 ;
int symcr = 0 ;
int sumcb = 0 ;
for (int i = 0; i < 2; i++)
{
HADAMARD4(a0,a1,a2,a3, hby[0][i], hby[1][i], hby[2][i], hby[3][i]);
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
y_satd_shared[mode] += ((sum_t)a0) + (a0 >> BITS_PER_SUM);
}
y_satd_shared[mode] = (y_satd_shared[mode] << 8) | mode;
}
if(ty == 1)
{
int a0 = acr[3][0];
int a1 = acr[3][1];
int a2 = acr[3][2];
int a3 = acr[3][3];
for (int i = 0; i < 2; i++)
{
HADAMARD4(a0,a1,a2,a3, bcr[0][i], bcr[1][i], bcr[2][i], bcr[3][i]);
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
cr_satd_shared[mode] += (a0) + (a0 >> BITS_PER_SUM);
}
cr_satd_shared[mode] = (cr_satd_shared[mode] << 8) | mode;
}
if(ty == 2)
{
int a0 = acb[3][0];
int a1 = acb[3][1];
int a2 = acb[3][2];
int a3 = acb[3][3];
for (int i = 0; i < 2; i++)
{
HADAMARD4(a0,a1,a2,a3, bcb[0][i], bcb[1][i], bcb[2][i], bcb[3][i]);
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
cb_satd_shared[mode] += (a0) + (a0 >> BITS_PER_SUM);
}
cb_satd_shared[mode] = (cb_satd_shared[mode] << 8) | mode;
}
}
// TO DO : Also store the sum values appropriately into the resultant array
// TO DO : Write the same HADAMARD4 macro from the serial code
} // if ( 4 == bsize) // end of SATD 4 COMPUTATION
*/
} // End of for(int mode =0;mode <35;mode++)
/*
__syncthreads();
if ( 0 == ty && 0 == tx )
{
sort(y_satd_shared);
extract(y_satd_shared, res_y, y_modes);
sort(cr_satd_shared);
extract(cr_satd_shared, res_cr, cr_modes);
sort(cb_satd_shared);
extract(cb_satd_shared, res_cb, cb_modes);
}
*/
} // End of kernel function hevcPredictionKernel()
| 79328b0785c5657c4879b2781ab6dc905f08d864.cu | /////////////////////////////////////////////////////////////////////////////
//****************************************************************************
//
// FILE NAME: kernel.cu
//
// DECSRIPTION: This is the source file containing the kernel
// for the HEVC encoding
//
// OPERATING SYSTEM: Linux UNIX only
// TESTED ON:
//
// CHANGE ACTIVITY:
// Date Who Description
// ========== ======= ===============
// 12-11-2013 Initial creation
//
//****************************************************************************
//////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#define ZERO 0
#define ONE 1
#define TWO 2
#define THREE 3
#define MINUS -1
#define DC_MODE 1
#define PLANAR_MODE 0
#define BITDEPTHY 8
#define BITDEPTHC 8
#define ANGULAR_18 18
#define ANGULAR_26 26
#define ANGULAR_10 10
#define TOTAL_MODES 35
#define MAX_BLOCK_SIZE 32
#define IA_MODES 16
#define BITS_PER_SUM (8 * sizeof(sum_t))
#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) { \
sum2_t t0 = s0 + s1; \
sum2_t t1 = s0 - s1; \
sum2_t t2 = s2 + s3; \
sum2_t t3 = s2 - s3; \
d0 = t0 + t2; \
d2 = t0 - t2; \
d1 = t1 + t3; \
d3 = t1 - t3; \
}
#define abs(x) ( ( (x) < 0 ) ? -(x) : (x) )
#define min(x,y) ( (x) < (y) ? (x) : (y) )
//////////////////
// CONSTANT MEMORY
//////////////////
__device__ __constant__ int ipa[TOTAL_MODES] = {0, 0, 32, 26, 21, 17, 13, 9, 5, 2, 0, -2, -5, -9, -13, -17, -21, -26, -32, -26, -21, -17, -13, -9, -5, -2, 0, 2, 5, 9, 13, 17, 21, 26, 32};
__device__ __constant__ int ia[IA_MODES] = {-4096, -1638, -910, -630, -482, -390, -315, -256, -315, -390, -482, -630, -910, -1638, -4096};
__device__ int sumArray(uint8_t *array, uint8_t start, uint8_t end)
{
int result;
for ( int counter = start; counter <= end; counter++ )
result += array[counter];
return result;
} // End of sumArray()
__device__ uint8_t clip3(uint8_t x, uint8_t y, uint8_t z)
{
if ( z < x )
return x;
else if ( z > y )
return y;
else
return z;
} // End of clip3()
__device__ uint8_t clip1Y(uint8_t x)
{
uint8_t ret = clip3(0, ( 1 << BITDEPTHY ) - 1, x);
return ret;
} // End of clip1Y()
__device__ sum2_t abs2(sum2_t a)
{
sum2_t s = ((a >> (BITS_PER_SUM - 1)) & (((sum2_t)1 << BITS_PER_SUM) + 1)) * ((sum_t)-1);
return (a + s) ^ s;
}
__device__ void sort(int32_t* input_values)
{
for(int i =0;i<TOTAL_MODES;i++)
{
int j=i;
while(j>0 && input_values[j] < input_values[j-1])
{
int32_t temp=input_values[j];
input_values[j]=input_values[j-1];
input_values[j-1]=temp;
j--;
}
}
} // End of sort()
__device__ void extract(int32_t *sorted_values, int32_t *res, uint8_t *modes)
{
for ( int counter = 0; counter < TOTAL_MODES; counter++)
{
uint8_t mode = sorted_values[counter] >> 8 & 0XFF;
int32_t value = sorted_values[counter] >> 8;
res[counter] = value;
modes[counter] = mode;
}
} // End of extract()
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
////////////// KERNEL FUNCTION /////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
/*
__global__ void hevcPredictionKernel(uint8_t *y, uint8_t *cr, uint8_t *cb, int32_t *res_y, int32_t *res_cr, int32_t *res_cb, uint8_t *y_modes, uint8_t *cr_modes, uint8_t *cb_modes, int height, int width)
{
printf("\nYUP I AM HERE\n");
}
*/
__global__ void hevcPredictionKernel(uint8_t *y, uint8_t *cr, uint8_t *cb, int32_t *res_y, int32_t *res_cr, int32_t *res_cb, uint8_t *y_modes, uint8_t *cr_modes, uint8_t *cb_modes, int height, int width)
{
// Thread indices, Block Indices and Dimensions
uint8_t bsize = blockDim.x;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Thread Index to Data Index Mapping
int col = tx + blockDim.x * bx;
int row = ty + blockDim.y * by;
if ( 0 == tx && 0 == ty && row == 0 && col == 0)
printf("\n YUP I AM HERE \n");
// Shared neighbour memory
int neighbourArraySize = (bsize * TWO) + ONE;
int bitDepthY=BITDEPTHY;
int bitDepthC=BITDEPTHC;
int rowToBeLoaded=0;
int colToBeLoaded=0;
int var = 3;
int var1 = 3;
/////////
// Neighbour Array
////////
// y is vertical array that has the extra element that is [-1][-1]
// x is horizontal component
// Neigbour Array for luma component
__device__ __shared__ uint8_t p_yy[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_xy[MAX_BLOCK_SIZE*2+1];
// Neighbour array for chroma component
__device__ __shared__ uint8_t p_ycr[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_ycb[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_xcr[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t p_xcb[MAX_BLOCK_SIZE*2+1];
// Pointer to neighbour elements in shared memory
uint8_t *pyy = &p_yy[ONE];
uint8_t *pxy = &p_xy[ZERO];
uint8_t *pycr = &p_ycr[ONE];
uint8_t *pxcr = &p_xcr[ZERO];
uint8_t *pycb = &p_ycb[ONE];
uint8_t *pxcb = &p_xcb[ZERO];
// Points to the righ top most block for which all
// the neighbour elements fall outside the image boundaries
unsigned int fallOutside = 0;
// This is to take care of the top right corner blocks in the grid
// OPTIMIZATION
if ( (0 == bx && 0 == by) )
fallOutside = 1;
/// DEBUG
//if ( fallOutside )
//printf("\nI AM FALLING OUTSIDE\n");
/// DEBUG
/*
if ( blockIdx.x == 0 && by == 0 && tx == 0 && ty == 0 )
{
printf("\nINPUT MATRIX WIDTH: %d HEIGHT: %d\n", width, height);
for ( int i = 0 ; i < width; i++)
{
for (int j = 0; j < height; j++ )
{
printf("\t%u", y[i*width+j]);
}
printf("\n");
}
}
__syncthreads();
*/
//////////////////////////////////
//////////////////////////////////
// Step 1: LOAD NEIGHBOUR ELEMENTS
//////////////////////////////////
//////////////////////////////////
// Load into the shared memory from global memory
// The loading is done based on a row basis
// Load luma elements
if ( ZERO == ty )
{
rowToBeLoaded=row-1;
colToBeLoaded=col;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pxy[tx] = (fallOutside == 1) ? (1 << (bitDepthY -1)) : y[(rowToBeLoaded*width)+colToBeLoaded];
pxcr[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : cr[(rowToBeLoaded*width)+colToBeLoaded];
pxcb[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : cb[(rowToBeLoaded*width)+colToBeLoaded];
}
}
else if ( ONE == ty )
{
rowToBeLoaded=row-2;
colToBeLoaded=col+blockDim.x;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pxy[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthY - 1)) : y[(rowToBeLoaded*width)+colToBeLoaded];
pxcr[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cr[(rowToBeLoaded*width)+colToBeLoaded]);
pxcb[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cb[(rowToBeLoaded*width)+colToBeLoaded]);
}
}
else if ( TWO == ty )
{
rowToBeLoaded=(row-2)+tx;
colToBeLoaded=blockDim.x*blockIdx.x-1;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pyy[tx] = (fallOutside == 1) ? (1 << (bitDepthY - 1)) : y[rowToBeLoaded*width + colToBeLoaded];
pycr[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cr[rowToBeLoaded*width + colToBeLoaded]);
pycb[tx] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cb[rowToBeLoaded*width + colToBeLoaded]);
}
}
else if ( THREE == ty )
{
rowToBeLoaded=(row+1)+tx;
colToBeLoaded=blockIdx.x*blockDim.x-1;
/// DEBUG
/*
if ( var == bx && var1 == by )
printf("\nRow: %d col: %d rowTO: %d colTO: %d\n", row, col, rowToBeLoaded, colToBeLoaded);
*/
if((rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width) || fallOutside)
{
pyy[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthY - 1)) : y[rowToBeLoaded*width + colToBeLoaded];
pycr[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cr[rowToBeLoaded*width + colToBeLoaded]);
pycb[tx + bsize] = (fallOutside == 1) ? (1 << (bitDepthC - 1)) : (cb[rowToBeLoaded *width + colToBeLoaded]);
}
}
else
{
// Nothing to do here
}
// This is to load the extra guy in the neighbour element array
// who is not filled by the threads in the current block
// i.e. the extra element in the pyy, pycr, pycb array
if ( 0 == tx && 0 == ty )
{
if ( ! ((0 == bx) || (0 == by)) )
{
// this should have been pyy[MINUS]
rowToBeLoaded=row-1;
colToBeLoaded=col-1;
if(rowToBeLoaded>=0 && rowToBeLoaded<height && colToBeLoaded>=0 && colToBeLoaded<width)
{
pyy[MINUS] = y[(rowToBeLoaded-1)*width + (colToBeLoaded-1)];
pycr[MINUS] = y[(rowToBeLoaded-1)*width + (colToBeLoaded-1)];
pycb[MINUS] = y[(rowToBeLoaded-1)*width + (colToBeLoaded-1)];
}
} // End of if ( ! ((0 == bx) || (0 == by)) )
if ( fallOutside)
{
pyy[MINUS] = 1 << (bitDepthY - 1);
pycr[MINUS] = 1 << (bitDepthC - 1);
pycb[MINUS] = 1 << (bitDepthC - 1);
}
} // End of if ( 0 == tx && 0 == ty )
__syncthreads();
/// DEBUG
/*
if ( blockIdx.x == var && blockIdx.y == var1 && tx == 0 && ty == 0 )
{
printf("\nPREDICTED MATRIX - PYY\n");
for ( int i = 0 ; i < 2*bsize+1; i++)
{
printf("\t%u", p_yy[i]);
}
printf("\nPREDICTED MATRIX - PXY\n");
for ( int i = 0 ; i < 2*bsize; i++)
{
printf("\t%u", p_xy[i]);
}
}
*/
//////////////////////////
//////////////////////////
// Step 2: First Filtering
//////////////////////////
//////////////////////////
if ( ZERO == tx && ZERO == ty )
{
if (by==(gridDim.y-1))
{
if(bx==ZERO)
{
for(int i=0;i<neighbourArraySize-1;i++)
{
pyy[i]=pxy[ZERO];
pycr[i] = pxcr[ZERO];
pycb[i] = pxcb[ZERO];
}
pyy[MINUS] = pxy[ZERO];
pycr[MINUS] = pxcr[ZERO];
pycb[MINUS] = pxcb[ZERO];
}
else
{
for(int i=bsize;i<(2*bsize);i++)
{
pyy[i]=pyy[bsize-ONE];
pycr[i] = pycr[bsize-ONE];
pycb[i] = pycb[bsize-ONE];
}
}
} // End of if (by==(gridDim.y-1))
if(0==by && !fallOutside)
{
pyy[MINUS]=pyy[ZERO];
pycr[MINUS] = pycr[ZERO];
pycb[MINUS] = pycb[ZERO];
for(int i=0;i<2*bsize;i++)
{
pxy[i]=pyy[MINUS];
pxcr[i]=pycr[MINUS];
pxcb[i]=pycb[MINUS];
}
} // End of if ( 0 == by )
if((bx == (gridDim.x - 1)) && (0 != by))
{
for ( int i = bsize; i < (2 * bsize); i++ )
{
pxy[i] = pxy[bsize - 1];
pxcr[i] = pxcr[bsize - 1];
pxcb[i] = pxcb[bsize - 1];
}
}
} // End of if ( ZERO == tx && ZERO == ty )
__syncthreads();
/// DEBUG
/*
if ( blockIdx.x == var && blockIdx.y == var1 && tx == 0 && ty == 0 )
{
printf("\nPREDICTED MATRIX - PYY\n");
for ( int i = 0 ; i < 2*bsize+1; i++)
{
printf("\t%u", p_yy[i]);
}
printf("\nPREDICTED MATRIX - PXY\n");
for ( int i = 0 ; i < 2*bsize; i++)
{
printf("\t%u", p_xy[i]);
}
}
*/
/////////////////////////////////////////////////
/////////////////////////////////////////////////
// STEP 3 : MODE COMPUTATION AND SECOND FILTERING
/////////////////////////////////////////////////
/////////////////////////////////////////////////
// TO DO
/////////
// Second Filtered neighbour array
/////////
__device__ __shared__ uint8_t pf_yy[MAX_BLOCK_SIZE*2+1];
__device__ __shared__ uint8_t pf_xy[MAX_BLOCK_SIZE*2+1];
////////
// Predicted pixels
///////
__device__ __shared__ uint8_t predSamplesY[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t predSamplesCr[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t predSamplesCb[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
// Pointer to predicted pixels
uint8_t *pfyy = &pf_yy[ONE];
uint8_t *pfxy = &pf_xy[ZERO];
//////
// Hadamard shared memory
//////
__device__ __shared__ uint8_t ay[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t acr[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t acb[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ uint8_t hby[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE/2];
__device__ __shared__ uint8_t bcr[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE/2];
__device__ __shared__ uint8_t bcb[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE/2];
__device__ __shared__ int32_t y_satd_shared[TOTAL_MODES];
__device__ __shared__ int32_t cr_satd_shared[TOTAL_MODES];
__device__ __shared__ int32_t cb_satd_shared[TOTAL_MODES];
__device__ __shared__ int32_t y_modes_shared[TOTAL_MODES];
__device__ __shared__ int32_t cr_modes_shared[TOTAL_MODES];
__device__ __shared__ int32_t cb_modes_shared[TOTAL_MODES];
// Loop through all modes
for(int mode =0;mode <35;mode++)
{
// if the computed value of filterFlag==1, use the filtered array pF instead of p for intra prediction.
int filterFlag=0;
int biIntFlag= 0;
if(ty==0 && tx==0)
{
//////////////
// FILTER FLAG
//////////////
if(mode==DC_MODE || bsize==4)
{
filterFlag=0;
}
else
{
int minDistVerHor = min(abs(mode-26),abs(mode-10));
int intraHorVerDistThres;
if(bsize==8)
{
intraHorVerDistThres=7;
}
else if(bsize==16)
{
intraHorVerDistThres=1;
}
else if(bsize==32)
{
intraHorVerDistThres=0;
}
else
{
// Nothing to do`
}
if(minDistVerHor>intraHorVerDistThres)
{
filterFlag=1;
}
else
{
filterFlag = 0;
}
} // End of else of if ( mode == DC_MODE || bsize == 4 )
if(filterFlag==1)
{
/////////////
// B INT FLAG
/////////////
if(bsize==32 && ( abs ( pyy[-1] + pxy[bsize*2-1] - (2*pxy[bsize-1]) ) < (1<<(bitDepthY-5) ) ) && ( abs ( pyy[-1] + pyy[bsize*2-1] - (2*pyy[bsize-1]) ) < (1<<(bitDepthY-5) ) ))
{
biIntFlag=1;
}
else
{
biIntFlag = 0;
}
} // End of if ( 1 == filterFlag )
///////////////////
// SECOND FILTERING
///////////////////
if(biIntFlag==1)
{
pfyy[MINUS]=pyy[MINUS];
for(int i=0;i<(bsize*2-1);i++)
{
pfyy[i]=((63-i)*pyy[MINUS]+(i+1)*pyy[63]+32)>>6;
}
pfyy[63]=pyy[63];
for(int i=0;i<(bsize*2-1);i++)
{
pfxy[i]=((63-i)*pyy[MINUS]+(i+1)*pxy[63]+32)>>6;
}
pfxy[63]=pxy[63];
} // End of if ( 1 == biIntFlag )
else
{
pfyy[MINUS]=(pyy[ZERO]+2*pyy[MINUS]+pxy[ZERO]+2)>>2;
for(int i=0;i<(bsize*2-1);i++)
{
pfyy[i]=(pyy[i+1]+2*pyy[i]+pyy[i-1]+2)>>2;
}
pfyy[bsize*2-1]=pyy[bsize*2-1];
pfxy[0] = (pyy[MINUS] + 2 * pxy[ZERO] + pxy[ONE] + 2) >> 2;
for(int i=1;i<(bsize*2-1);i++)
{
pfxy[i]=(pxy[i-1]+2*pxy[i]+pxy[i+1]+2)>>2;
}
pfxy[bsize*2-1]=pxy[bsize*2-1];
} // End of else of if ( 1 -- biIntFlag )
} // End of if(ty==0 && tx==0)
__syncthreads();
//////////////
// Switch pointer to pfyy or p_yy
// Switch pointer to pfxy or p_xy
/////////////
uint8_t *selyy, *selxy;
if(filterFlag==1)
{
selyy=&pf_yy[ONE];
selxy=&pf_xy[ZERO];
}
else
{
selyy=pyy;
selxy=pxy;
}
__device__ __shared__ uint8_t ref_Y[3*MAX_BLOCK_SIZE+1];
__device__ __shared__ uint8_t ref_Cr[3*MAX_BLOCK_SIZE+1];
__device__ __shared__ uint8_t ref_Cb[3*MAX_BLOCK_SIZE+1];
// Pointer to ref arrays
uint8_t *refY = &ref_Y[4];
uint8_t *refCr = &ref_Cr[4];
uint8_t *refCb = &ref_Cb[4];
// OPTIMIZATION making iIdx and IFact as matrices
__device__ __shared__ int iIdx[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
__device__ __shared__ int iFact[MAX_BLOCK_SIZE][MAX_BLOCK_SIZE];
////////////////////
// MODE: PLANAR MODE
////////////////////
// TO DO : is this ty tx
if(mode==PLANAR_MODE)
{
float logValue = log2f(bsize+1.0);
int intLog = (int) logValue;
predSamplesY[ty][tx]=((bsize-1-tx)*selyy[ty]+(tx+1)*selxy[bsize]+(bsize-1-ty)*selxy[tx]+(ty+1)*selyy[bsize]+bsize)>>intLog; //TO_DO: Replace logarithmic with appropriate C function
predSamplesCr[ty][tx]=((bsize-1-tx)*pycr[ty]+(tx+1)*pxcr[bsize]+(bsize-1-ty)*pxcr[tx]+(ty+1)*pycr[bsize]+bsize)>>intLog; //TO_DO: Replace logarithmic with appropriate C function
predSamplesCb[ty][tx]=((bsize-1-tx)*pycb[ty]+(tx+1)*pxcb[bsize]+(bsize-1-ty)*pxcb[tx]+(ty+1)*pycb[bsize]+bsize)>>intLog; //TO_DO: Replace logarithmic with appropriate C function
}
////////////////
// MODE: DC MODE
////////////////
else if ( DC_MODE == mode )
{
uint8_t dcValY = 0;
uint8_t dcValCr = 0;
uint8_t dcValCb = 0;
uint8_t firstSumY = 0;
uint8_t secondSumY = 0;
uint8_t firstSumCr = 0;
uint8_t secondSumCr = 0;
uint8_t firstSumCb = 0;
uint8_t secondSumCb = 0;
//OPTIMIZATION
if ( 0 == tx && 0 == ty )
{
firstSumY = sumArray(selxy, 0, bsize - 1);
}
else if ( 1 == tx && 0 == ty )
{
secondSumY = sumArray(selyy, 0, bsize - 1);
}
else if ( 2 == tx && 0 == ty )
{
firstSumCr = sumArray(pxcr, 0, bsize - 1);
}
else if ( 3 == tx && 0 == ty )
{
secondSumCr = sumArray(pycr, 0, bsize - 1);
if(bsize==4)
{
firstSumCb = sumArray(pxcb, 0, bsize - 1);
secondSumCb = sumArray(pycb, 0, bsize - 1);
}
}
else if ( 4 == tx && 0 == ty && bsize!=4 )
{
firstSumCb = sumArray(pxcb, 0, bsize - 1);
}
else if ( 5 == tx && 0 == ty && bsize!=4)
{
secondSumCb = sumArray(pycb, 0, bsize - 1);
}
__syncthreads();
if ( 0 == tx && 0 == ty )
{
dcValY = (firstSumY + secondSumY + bsize) >> ((int)log2f((float)bsize)+1);
}
else if ( 1 == tx && 0 == ty )
{
dcValCr = (firstSumCr + secondSumCr + bsize) >> ((int)log2f((float)bsize)+1);
}
else if ( 2 == tx && 0 == ty )
{
dcValCb = (firstSumCb + secondSumCb + bsize) >> ((int)log2f((float)bsize)+1);
}
__syncthreads();
if ( bsize < 32 )
{
//Apply following changes to predSamples only for luma channel
if(ZERO == ty && ZERO == tx)
predSamplesY[0][0]=(selyy[ZERO]+2*dcValY+selxy[0]+2)>>2;
if(ONE == ty && tx>=ONE && tx<=bsize-1)
predSamplesY[0][tx]=(selxy[tx]+3*dcValY+2)>>2;
if(TWO == ty && tx>=ONE && tx<=bsize-1)
predSamplesY[tx][0]=(selyy[tx]+3*dcValY+2)>>2;
if(tx >0 && ty >0)
predSamplesY[tx][ty]=dcValY;
} // End of if ( bsize < 32 )
else
{
//For cr and cb, set dcValue as all value for predSamples of cr and cb
predSamplesY[ty][tx] = dcValY;
predSamplesCr[ty][tx]=dcValCr;
predSamplesCb[ty][tx]=dcValCb;
} // End of else of if ( bsize < 32 )
} // End of else if ( DC_MODE == mode )
///////////////
// ANGULAR MODE
///////////////
else if ( mode >= ANGULAR_18 )
{
// OPTIMIZATION
if ( bsize == 4 )
{
if ( 0 == ty )
{
if(tx==0){
refY[0]=selyy[MINUS];
refCr[0]=pycr[MINUS];
refCb[0]=pycb[MINUS];
}
else{
refY[tx] = selxy[-1 + tx];
refCr[tx] = pxcr[-1 + tx];
refCb[tx] = pxcb[-1 + tx];
}
if ( 0 == tx )
{
refY[bsize+tx] = selxy[-1 + (tx + bsize)];
refCr[bsize+tx] = pxcr[-1 + (tx + bsize)];
refCb[bsize+tx] = pxcb[-1 + (tx + bsize)];
}
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 1 == ty )
{
refY[-(tx + 1)] = selyy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 2 == ty )
{
refCr[-(tx + 1)] = pycr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
refCb[-(tx + 1)] = pycb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 3 == ty )
{
refY[tx + bsize + 1] = selxy[-1 + tx + bsize + 1];
refCr[tx + bsize + 1] = pxcr[-1 + tx + bsize + 1];
refCb[tx + bsize + 1] = pxcb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of if ( bsize == 4 )
else
{
if ( 0 == ty )
{
if(tx==0){
refY[0]=selyy[MINUS];
}
else
refY[tx] = selxy[-1 + tx];
if ( 0 == tx )
refY[bsize + tx] = selxy[-1 + (tx + bsize)];
}
if ( 1 == ty )
{
if(tx==0){
refCr[0]=pycr[MINUS];
}
else
refCr[tx] = pxcr[-1 + tx];
if ( 0 == tx )
refCr[bsize+tx] = pxcr[-1 + (tx + bsize)];
}
if ( 2 == ty )
{
if(tx==0){
refCb[0]=pycb[MINUS];
}
else
refCb[tx] = pxcb[-1 + tx];
if ( 0 == tx )
refCb[bsize+tx] = pxcb[-1 + (tx + bsize)];
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 3 == ty )
{
refY[-(tx + 1)] = selyy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 4 == ty )
{
refCr[-(tx + 1)] = pycr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 5 == ty )
{
refCb[-(tx + 1)] = pycb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 6 == ty )
refY[tx + bsize + 1] = selxy[-1 + tx + bsize + 1];
if ( 7 == ty )
{
refCr[tx + bsize + 1] = pxcr[-1 + tx + bsize + 1];
if ( bsize == 8 )
refCb[tx + bsize + 1] = pxcb[-1 + tx + bsize + 1];
}
if ( 8 == ty && bsize != 8 )
{
refCb[tx + bsize + 1] = pxcb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of else of if ( bsize == 4 )
// Load iIdx and iFact
iIdx[ty][tx] = ((ty+1) * ipa[mode]) >> 5;
iFact[ty][tx] = ((ty+1) * ipa[mode]) & 31;
if ( iFact[ty][tx] != 0 )
{
predSamplesY[ty][tx] = ((32 - iFact[ty][tx]) * refY[tx + iIdx[ty][tx] + 1] + iFact[ty][tx] * refY[tx + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCr[ty][tx] = ((32 - iFact[ty][tx]) * refCr[tx + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCr[tx + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCb[ty][tx] = ((32 - iFact[ty][tx]) * refCb[tx + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCb[tx + iIdx[ty][tx] + 2] + 16) >> 5;
}
else
{
predSamplesY[ty][tx] = refY[tx + iIdx[ty][tx] + 1];
predSamplesCr[ty][tx] = refCr[tx + iIdx[ty][tx] + 1];
predSamplesCb[ty][tx] = refCb[tx + iIdx[ty][tx] + 1];
}
if ( mode == ANGULAR_26 && bsize < 32 )
{
if ( 0 == tx )
{
uint8_t param = selxy[tx] + ((selyy[ty] - selyy[MINUS]) >> 1);
predSamplesY[ty][tx] = clip1Y(param);
}
} // End of if ( mode == ANGULAR_26 && bsize < 32 )
} // End of else if ( mode >= ANGULAR_18 )
else if ( mode > DC_MODE && mode < ANGULAR_18 )
{
if ( 4 == bsize )
{
if ( 0 == ty )
{
refY[tx] = selyy[-1 + tx];
refCr[tx] = pycr[-1 + tx];
refCb[tx] = pycb[-1 + tx];
if ( 0 == tx )
{
refY[bsize+tx] = selyy[-1 + (tx + bsize)];
refCr[bsize+tx] = pycr[-1 + (tx + bsize)];
refCb[bsize+tx] = pycb[-1 + (tx + bsize)];
}
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 1 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refY[-(tx + 1)] = selyy[MINUS];
else
refY[-(tx + 1)] = selxy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 2 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0){
refCr[-(tx + 1)] = pycr[MINUS];
refCb[-(tx + 1)] = pycb[MINUS];
}
else{
refCr[-(tx + 1)] = pxcr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
refCb[-(tx + 1)] = pxcb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 3 == ty )
{
refY[tx + bsize + 1] = selyy[-1 + tx + bsize + 1];
refCr[tx + bsize + 1] = pycr[-1 + tx + bsize + 1];
refCb[tx + bsize + 1] = pycb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of if ( 4 == bsize )
else
{
if ( 0 == ty )
{
refY[tx] = selyy[-1 + tx];
if ( 0 == tx )
refY[bsize + tx] = selyy[-1 + (tx + bsize)];
}
if ( 1 == ty )
{
refCr[tx] = pycr[-1 + tx];
if ( 0 == tx )
refCr[bsize+tx] = pycr[-1 + (tx + bsize)];
}
if ( 2 == ty )
{
refCb[tx] = pycb[-1 + tx];
if ( 0 == tx )
refCb[bsize+tx] = pycb[-1 + (tx + bsize)];
}
if (ipa[mode] < 0)
{
if ( ((bsize * ipa[mode]) >> 5) < -1 )
{
if ( 3 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refY[-(tx + 1)] = selyy[MINUS];
else
refY[-(tx + 1)] = selxy[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 4 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refCr[-(tx + 1)] = pycr[MINUS];
else
refCr[-(tx + 1)] = pxcr[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
if ( 5 == ty )
{
if((-1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8)<0)
refCb[-(tx + 1)] = pycb[MINUS];
else
refCb[-(tx + 1)] = pxcb[ -1 + (( tx + 1 ) * ia[mode-11] + 128) >> 8];
}
} // End of if ( ((bsize * ipa[mode]) >> 5) < -1 )
} // End of if (ipa[mode] < 0)
else
{
if ( 6 == ty )
refY[tx + bsize + 1] = selyy[-1 + tx + bsize + 1];
if ( 7 == ty )
{
refCr[tx + bsize + 1] = pycr[-1 + tx + bsize + 1];
if ( bsize == 8 )
refCb[tx + bsize + 1] = pycb[-1 + tx + bsize + 1];
}
if ( 8 == ty && bsize != 8 )
{
refCb[tx + bsize + 1] = pycb[-1 + tx + bsize + 1];
}
} // End of else of if (ipa[mode] < 0)
} // End of else of if ( 4 == bsize )
// Load iIdx and iFact
iIdx[ty][tx] = ( (tx + 1) * ipa[mode] ) >> 5;
iFact[ty][tx] = ( (tx + 1) * ipa[mode] ) & 31;
if ( iFact[ty][tx] != 0 )
{
predSamplesY[ty][tx] = ((32 - iFact[ty][tx]) * refY[ty + iIdx[ty][tx] + 1] + iFact[ty][tx] * refY[ty + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCr[ty][tx] = ((32 - iFact[ty][tx]) * refCr[ty + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCr[ty + iIdx[ty][tx] + 2] + 16) >> 5;
predSamplesCb[ty][tx] = ((32 - iFact[ty][tx]) * refCb[ty + iIdx[ty][tx] + 1] + iFact[ty][tx] * refCb[ty + iIdx[ty][tx] + 2] + 16) >> 5;
}
else
{
predSamplesY[ty][tx] = refY[ty + iIdx[ty][tx] + 1];
predSamplesCr[ty][tx] = refCr[ty + iIdx[ty][tx] + 1];
predSamplesCb[ty][tx] = refCb[ty + iIdx[ty][tx] + 1];
}
if ( mode == ANGULAR_10 && bsize < 32 )
{
if ( 0 == tx )
predSamplesY[ty][tx] = clip1Y(( (selyy[ty]) + ((selxy[tx]-selyy[MINUS])>>1) ));
} // End of if ( mode == ANGULAR_10 && bsize < 32 )
} // End of else if ( mode > ANGULAR_1 && mode < ANGULAR_18 )
__syncthreads();
/*
///////////////////
// STEP 4: HADAMARD
///////////////////
// finally calculation of SATD values for different modes
// have A matrix which is a shared memory
// all the threads fill the 'A' array
if(bsize == 4)
{
// everybody computes the difference of pixels
ay[ty][tx] = predSamplesY[ty][tx] - y[row*width + col];
acr[ty][tx] = predSamplesCr[ty][tx] - cr[row*width + col];
acb[ty][tx] = predSamplesCb[ty][tx] - cb[row*width + col];
// construct the B-matrix : 8 threads are working
if(tx < 2)
{
hby[ty][tx] = (ay[ty][2*tx] + ay[ty][2*tx + 1]) + ((ay[ty][2*tx] - ay[ty][2*tx + 1]) << BITS_PER_SUM);
bcr[ty][tx] = (acr[ty][2*tx] + acr[ty][2*tx + 1]) + ((acr[ty][2*tx] - acr[ty][2*tx+1]) << BITS_PER_SUM);
bcb[ty][tx] = (acb[ty][2*tx] + acb[ty][2*tx + 1]) + ((acb[ty][2*tx] - acb[ty][2*tx+1]) << BITS_PER_SUM);
}
__syncthreads();
if(tx == 3)
{
// 4 threads work to calculate the value
if(ty == 0)
{
int a0 = ay[3][0];
int a1 = ay[3][1];
int a2 = ay[3][2];
int a3 = ay[3][3];
int sumy = 0 ;
int symcr = 0 ;
int sumcb = 0 ;
for (int i = 0; i < 2; i++)
{
HADAMARD4(a0,a1,a2,a3, hby[0][i], hby[1][i], hby[2][i], hby[3][i]);
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
y_satd_shared[mode] += ((sum_t)a0) + (a0 >> BITS_PER_SUM);
}
y_satd_shared[mode] = (y_satd_shared[mode] << 8) | mode;
}
if(ty == 1)
{
int a0 = acr[3][0];
int a1 = acr[3][1];
int a2 = acr[3][2];
int a3 = acr[3][3];
for (int i = 0; i < 2; i++)
{
HADAMARD4(a0,a1,a2,a3, bcr[0][i], bcr[1][i], bcr[2][i], bcr[3][i]);
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
cr_satd_shared[mode] += (a0) + (a0 >> BITS_PER_SUM);
}
cr_satd_shared[mode] = (cr_satd_shared[mode] << 8) | mode;
}
if(ty == 2)
{
int a0 = acb[3][0];
int a1 = acb[3][1];
int a2 = acb[3][2];
int a3 = acb[3][3];
for (int i = 0; i < 2; i++)
{
HADAMARD4(a0,a1,a2,a3, bcb[0][i], bcb[1][i], bcb[2][i], bcb[3][i]);
a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
cb_satd_shared[mode] += (a0) + (a0 >> BITS_PER_SUM);
}
cb_satd_shared[mode] = (cb_satd_shared[mode] << 8) | mode;
}
}
// TO DO : Also store the sum values appropriately into the resultant array
// TO DO : Write the same HADAMARD4 macro from the serial code
} // if ( 4 == bsize) // end of SATD 4 COMPUTATION
*/
} // End of for(int mode =0;mode <35;mode++)
/*
__syncthreads();
if ( 0 == ty && 0 == tx )
{
sort(y_satd_shared);
extract(y_satd_shared, res_y, y_modes);
sort(cr_satd_shared);
extract(cr_satd_shared, res_cr, cr_modes);
sort(cb_satd_shared);
extract(cb_satd_shared, res_cb, cb_modes);
}
*/
} // End of kernel function hevcPredictionKernel()
|
7597fc517a24a03808eb91ea55a139bd37ef0ff2.hip | // !!! This is a file automatically generated by hipify!!!
#include "gg.h"
#include "ggcuda.h"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include "thread_work.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
#include "moderngpu/kernel_reduce.hxx"
#include "tc_cuda.cuh"
#include "moderngpu/kernel_segsort.hxx"
#include <hip/hip_runtime_api.h>
mgpu::standard_context_t context;
#define WARP_SIZE 32
inline __device__ unsigned long intersect(CSRGraph graph, index_type u, index_type v) {
index_type u_start = graph.getFirstEdge(u);
index_type u_end = u_start + graph.getOutDegree(u);
index_type v_start = graph.getFirstEdge(v);
index_type v_end = v_start + graph.getOutDegree(v);
unsigned long count = 0;
index_type u_it = u_start;
index_type v_it = v_start;
index_type a;
index_type b;
while (u_it < u_end && v_it < v_end) {
a = graph.getAbsDestination(u_it);
b = graph.getAbsDestination(v_it);
int d = a - b;
if (d <= 0) u_it++;
if (d >= 0) v_it++;
if (d == 0) count++;
}
return count;
}
__global__ void base(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long local_total = 0;
__shared__ hipcub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
for (index_type src = begin + tid; src < end; src += TOTAL_THREADS_1D) {
index_type row_begin = graph.getFirstEdge(src);
index_type row_end = row_begin + graph.getOutDegree(src);
for (index_type offset = row_begin; offset < row_end; ++ offset) {
index_type dst = graph.getAbsDestination(offset);
local_total = intersect(graph, dst, src);
if (local_total) num_local_triangles.reduce(local_total);
}
}
num_local_triangles.thread_exit<hipcub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
inline __device__ bool serial_search(CSRGraph graph, unsigned key, index_type begin, index_type end) {
for (index_type offset = begin; offset < end; ++ offset) {
index_type d = graph.getAbsDestination(offset);
if (d == key) return true;
if (d > key) return false;
}
return false;
}
inline __device__ bool binary_search(CSRGraph graph, index_type key, index_type begin, index_type end) {
assert(begin < end);
int l = begin;
int r = end-1;
while (r >= l) {
//assert(l<graph.nedges && r<graph.nedges);
int mid = l + (r - l) / 2;
if (mid >= graph.nedges) printf("mid=%u, l=%u, r=%u, begin=%u, end=%u, key=%u\n", mid, l, r, begin, end, key);
assert(mid < graph.nedges);
index_type value = graph.getAbsDestination(mid);
if (value == key) return true;
if (value < key) l = mid + 1;
else r = mid - 1;
}
return false;
}
__global__ void warp(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned thread_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp
unsigned warp_id = thread_id / WARP_SIZE; // global warp index
unsigned num_warps = (TB_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps
__shared__ hipcub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
// each warp takes one vertex
for (index_type src = begin + warp_id; src < end; src += num_warps) {
index_type row_begin = graph.getFirstEdge(src);
index_type src_size = graph.getOutDegree(src);
index_type row_end = row_begin + src_size;
// take one edge
for (index_type offset = row_begin; offset < row_end; offset ++) {
index_type dst = graph.getAbsDestination(offset);
assert(src != dst);
index_type dst_size = graph.getOutDegree(dst);
index_type lookup = src;
index_type search = dst;
if (src_size > dst_size) {
lookup = dst;
search = src;
}
index_type lookup_begin = graph.getFirstEdge(lookup);
index_type lookup_size = graph.getOutDegree(lookup);
index_type search_size = graph.getOutDegree(search);
if (lookup_size > 0 && search_size > 0) {
for (index_type i = thread_lane; i < lookup_size; i += WARP_SIZE) {
index_type index = lookup_begin + i;
index_type key = graph.getAbsDestination(index);
index_type search_begin = graph.getFirstEdge(search);
if (binary_search(graph, key, search_begin, search_begin+search_size))
//if (serial_search(graph, key, search_begin, search_begin+search_size))
num_local_triangles.reduce(1);
}
}
}
}
num_local_triangles.thread_exit<hipcub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
void sortEdgesByDestination_cuda(struct CUDA_Context* ctx) {
mgpu::segmented_sort(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, mgpu::less_t<int>(), context);
}
void TC_cuda(unsigned __begin, unsigned __end, unsigned long & num_local_triangles, struct CUDA_Context* ctx) {
dim3 blocks;
dim3 threads;
kernel_sizing(blocks, threads);
HGAccumulator<unsigned long> _num_local_triangles;
Shared<unsigned long> num_local_trianglesval = Shared<unsigned long>(1);
*(num_local_trianglesval.cpu_wr_ptr()) = 0;
_num_local_triangles.rv = num_local_trianglesval.gpu_wr_ptr();
//mgc = mgpu::CreateCudaDevice(ctx->device);
//mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc);
//base<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles);
hipLaunchKernelGGL(( warp), dim3(blocks), dim3(TB_SIZE), 0, 0, ctx->gg, __begin, __end, _num_local_triangles);
hipDeviceSynchronize();
check_cuda_kernel;
num_local_triangles = *(num_local_trianglesval.cpu_rd_ptr());
//dump_memory_info("end", ctx->id);
hipProfilerStop();
//num_local_triangles = (unsigned)h_total;
}
void TC_masterNodes_cuda(unsigned long& num_local_triangles, struct CUDA_Context* ctx) {
TC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, num_local_triangles, ctx);
}
| 7597fc517a24a03808eb91ea55a139bd37ef0ff2.cu | #include "gg.h"
#include "ggcuda.h"
#include "cub/cub.cuh"
#include "cub/util_allocator.cuh"
#include "thread_work.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
#include "moderngpu/kernel_reduce.hxx"
#include "tc_cuda.cuh"
#include "moderngpu/kernel_segsort.hxx"
#include <cuda_profiler_api.h>
mgpu::standard_context_t context;
#define WARP_SIZE 32
inline __device__ unsigned long intersect(CSRGraph graph, index_type u, index_type v) {
index_type u_start = graph.getFirstEdge(u);
index_type u_end = u_start + graph.getOutDegree(u);
index_type v_start = graph.getFirstEdge(v);
index_type v_end = v_start + graph.getOutDegree(v);
unsigned long count = 0;
index_type u_it = u_start;
index_type v_it = v_start;
index_type a;
index_type b;
while (u_it < u_end && v_it < v_end) {
a = graph.getAbsDestination(u_it);
b = graph.getAbsDestination(v_it);
int d = a - b;
if (d <= 0) u_it++;
if (d >= 0) v_it++;
if (d == 0) count++;
}
return count;
}
__global__ void base(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long local_total = 0;
__shared__ cub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
for (index_type src = begin + tid; src < end; src += TOTAL_THREADS_1D) {
index_type row_begin = graph.getFirstEdge(src);
index_type row_end = row_begin + graph.getOutDegree(src);
for (index_type offset = row_begin; offset < row_end; ++ offset) {
index_type dst = graph.getAbsDestination(offset);
local_total = intersect(graph, dst, src);
if (local_total) num_local_triangles.reduce(local_total);
}
}
num_local_triangles.thread_exit<cub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
inline __device__ bool serial_search(CSRGraph graph, unsigned key, index_type begin, index_type end) {
for (index_type offset = begin; offset < end; ++ offset) {
index_type d = graph.getAbsDestination(offset);
if (d == key) return true;
if (d > key) return false;
}
return false;
}
inline __device__ bool binary_search(CSRGraph graph, index_type key, index_type begin, index_type end) {
assert(begin < end);
int l = begin;
int r = end-1;
while (r >= l) {
//assert(l<graph.nedges && r<graph.nedges);
int mid = l + (r - l) / 2;
if (mid >= graph.nedges) printf("mid=%u, l=%u, r=%u, begin=%u, end=%u, key=%u\n", mid, l, r, begin, end, key);
assert(mid < graph.nedges);
index_type value = graph.getAbsDestination(mid);
if (value == key) return true;
if (value < key) l = mid + 1;
else r = mid - 1;
}
return false;
}
__global__ void warp(CSRGraph graph, unsigned begin, unsigned end, HGAccumulator<unsigned long> num_local_triangles) {
unsigned thread_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp
unsigned warp_id = thread_id / WARP_SIZE; // global warp index
unsigned num_warps = (TB_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps
__shared__ cub::BlockReduce<unsigned long, TB_SIZE>::TempStorage num_local_triangles_ts;
num_local_triangles.thread_entry();
// each warp takes one vertex
for (index_type src = begin + warp_id; src < end; src += num_warps) {
index_type row_begin = graph.getFirstEdge(src);
index_type src_size = graph.getOutDegree(src);
index_type row_end = row_begin + src_size;
// take one edge
for (index_type offset = row_begin; offset < row_end; offset ++) {
index_type dst = graph.getAbsDestination(offset);
assert(src != dst);
index_type dst_size = graph.getOutDegree(dst);
index_type lookup = src;
index_type search = dst;
if (src_size > dst_size) {
lookup = dst;
search = src;
}
index_type lookup_begin = graph.getFirstEdge(lookup);
index_type lookup_size = graph.getOutDegree(lookup);
index_type search_size = graph.getOutDegree(search);
if (lookup_size > 0 && search_size > 0) {
for (index_type i = thread_lane; i < lookup_size; i += WARP_SIZE) {
index_type index = lookup_begin + i;
index_type key = graph.getAbsDestination(index);
index_type search_begin = graph.getFirstEdge(search);
if (binary_search(graph, key, search_begin, search_begin+search_size))
//if (serial_search(graph, key, search_begin, search_begin+search_size))
num_local_triangles.reduce(1);
}
}
}
}
num_local_triangles.thread_exit<cub::BlockReduce<unsigned long, TB_SIZE> >(num_local_triangles_ts);
}
void sortEdgesByDestination_cuda(struct CUDA_Context* ctx) {
mgpu::segmented_sort(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, mgpu::less_t<int>(), context);
}
void TC_cuda(unsigned __begin, unsigned __end, unsigned long & num_local_triangles, struct CUDA_Context* ctx) {
dim3 blocks;
dim3 threads;
kernel_sizing(blocks, threads);
HGAccumulator<unsigned long> _num_local_triangles;
Shared<unsigned long> num_local_trianglesval = Shared<unsigned long>(1);
*(num_local_trianglesval.cpu_wr_ptr()) = 0;
_num_local_triangles.rv = num_local_trianglesval.gpu_wr_ptr();
//mgc = mgpu::CreateCudaDevice(ctx->device);
//mgpu::SegSortKeysFromIndices(ctx->gg.edge_dst, ctx->gg.nedges, (const int *) ctx->gg.row_start + 1, ctx->gg.nnodes - 1, *mgc);
//base<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles);
warp<<<blocks, TB_SIZE>>>(ctx->gg, __begin, __end, _num_local_triangles);
cudaDeviceSynchronize();
check_cuda_kernel;
num_local_triangles = *(num_local_trianglesval.cpu_rd_ptr());
//dump_memory_info("end", ctx->id);
cudaProfilerStop();
//num_local_triangles = (unsigned)h_total;
}
void TC_masterNodes_cuda(unsigned long& num_local_triangles, struct CUDA_Context* ctx) {
TC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, num_local_triangles, ctx);
}
|
047bb54ac5e1030ab61b80d664c1d0c52a820e09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// \file Benchmarker.cu
/// \author Johannes de Fine Licht
#include "VecGeom/benchmarking/Benchmarker.h"
#include "VecGeom/base/Stopwatch.h"
#include "VecGeom/backend/cuda/Backend.h"
#include "VecGeom/management/CudaManager.h"
namespace vecgeom {
inline namespace cuda {
__global__ void ContainsBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, bool *const contains)
{
const int i = ThreadIndex();
if (i >= n) return;
contains[i] = volume->Contains(positions[i]);
}
__global__ void InsideBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, Inside_t *const inside)
{
const int i = ThreadIndex();
if (i >= n) return;
inside[i] = volume->Inside(positions[i]);
}
__global__ void DistanceToInBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const SOA3D<Precision> directions, const int n,
Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->DistanceToIn(positions[i], directions[i]);
}
__global__ void DistanceToOutBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const SOA3D<Precision> directions, const int n,
Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->DistanceToOut(positions[i], directions[i]);
}
__global__ void SafetyToInBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->SafetyToIn(positions[i]);
}
__global__ void SafetyToOutBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->SafetyToOut(positions[i]);
}
} // End cuda namespace
// This is odd ... We are implementing one of the member function of cxx class
// in a .cu file
void Benchmarker::RunInsideCuda(Precision *const posX, Precision *const posY, Precision *const posZ,
bool *const contains, Inside_t *const inside)
{
typedef cxx::DevicePtr<vecgeom::cuda::VPlacedVolume> CudaVolume;
typedef vecgeom::cuda::SOA3D<Precision> CudaSOA3D;
if (fVerbosity > 0) printf("CUDA - ");
std::list<CudaVolume> volumesGpu;
GetVolumePointers(volumesGpu);
vecgeom::cuda::LaunchParameters launch = vecgeom::cuda::LaunchParameters(fPointCount);
Stopwatch timer;
cxx::DevicePtr<Precision> posXGpu;
posXGpu.Allocate(fPointCount);
cxx::DevicePtr<Precision> posYGpu;
posYGpu.Allocate(fPointCount);
cxx::DevicePtr<Precision> posZGpu;
posZGpu.Allocate(fPointCount);
posXGpu.ToDevice(posX, fPointCount);
posYGpu.ToDevice(posY, fPointCount);
posZGpu.ToDevice(posZ, fPointCount);
bool *containsGpu = cxx::AllocateOnGpu<bool>(sizeof(bool) * fPointCount);
Inside_t *insideGpu = cxx::AllocateOnGpu<Inside_t>(sizeof(Inside_t) * fPointCount);
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, fPointCount);
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
hipLaunchKernelGGL(( vecgeom::cuda::InsideBenchmarkCudaKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0, *v, positionGpu, fPointCount,
insideGpu);
}
}
hipDeviceSynchronize();
Precision elapsedInside = timer.Stop();
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
hipLaunchKernelGGL(( vecgeom::cuda::ContainsBenchmarkCudaKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0, *v, positionGpu, fPointCount,
containsGpu);
}
}
hipDeviceSynchronize();
Precision elapsedContains = timer.Stop();
cxx::CopyFromGpu(insideGpu, inside, fPointCount * sizeof(Inside_t));
cxx::CopyFromGpu(containsGpu, contains, fPointCount * sizeof(bool));
cxx::FreeFromGpu(containsGpu);
cxx::FreeFromGpu(insideGpu);
posXGpu.Deallocate();
posYGpu.Deallocate();
posZGpu.Deallocate();
if (fVerbosity > 0) {
printf("Inside: %.6fs (%.6fs), Contains: %.6fs (%.6fs), "
"Inside/Contains: %.2f\n",
elapsedInside, elapsedInside / fVolumes.size(), elapsedContains, elapsedContains / fVolumes.size(),
elapsedInside / elapsedContains);
}
fResults.push_back(GenerateBenchmarkResult(elapsedContains, kBenchmarkContains, kBenchmarkCuda, fInsideBias));
fResults.push_back(GenerateBenchmarkResult(elapsedInside, kBenchmarkInside, kBenchmarkCuda, fInsideBias));
hipDeviceSynchronize();
}
void Benchmarker::RunToInCuda(Precision *const posX, Precision *const posY, Precision *const posZ,
Precision *const dirX, Precision *const dirY, Precision *const dirZ,
Precision *const distances, Precision *const safeties)
{
typedef cxx::DevicePtr<vecgeom::cuda::VPlacedVolume> CudaVolume;
typedef vecgeom::cuda::SOA3D<Precision> CudaSOA3D;
if (fVerbosity > 0) printf("CUDA - ");
std::list<CudaVolume> volumesGpu;
GetVolumePointers(volumesGpu);
vecgeom::cuda::LaunchParameters launch = vecgeom::cuda::LaunchParameters(fPointCount);
vecgeom::cuda::Stopwatch timer;
Precision *posXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
cxx::CopyToGpu(posX, posXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posY, posYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posZ, posZGpu, fPointCount * sizeof(Precision));
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, fPointCount);
Precision *dirXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
cxx::CopyToGpu(dirX, dirXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirY, dirYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirZ, dirZGpu, fPointCount * sizeof(Precision));
CudaSOA3D directionGpu = CudaSOA3D(dirXGpu, dirYGpu, dirZGpu, fPointCount);
Precision *distancesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *safetiesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
hipLaunchKernelGGL(( vecgeom::cuda::SafetyToInBenchmarkCudaKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0, *v, positionGpu,
fPointCount, safetiesGpu);
}
}
hipDeviceSynchronize();
Precision elapsedSafety = timer.Stop();
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
hipLaunchKernelGGL(( vecgeom::cuda::DistanceToInBenchmarkCudaKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0,
*v, positionGpu, directionGpu, fPointCount, distancesGpu);
}
}
hipDeviceSynchronize();
Precision elapsedDistance = timer.Stop();
cxx::CopyFromGpu(safetiesGpu, safeties, fPointCount * sizeof(Precision));
cxx::CopyFromGpu(distancesGpu, distances, fPointCount * sizeof(Precision));
cxx::FreeFromGpu(distancesGpu);
cxx::FreeFromGpu(dirXGpu);
cxx::FreeFromGpu(dirYGpu);
cxx::FreeFromGpu(dirZGpu);
cxx::FreeFromGpu(safetiesGpu);
cxx::FreeFromGpu(posXGpu);
cxx::FreeFromGpu(posYGpu);
cxx::FreeFromGpu(posZGpu);
if (fVerbosity > 0) {
printf("DistanceToIn: %.6fs (%.6fs), SafetyToIn: %.6fs (%.6fs), "
"DistanceToIn/SafetyToIn: %.2f\n",
elapsedDistance, elapsedDistance / fVolumes.size(), elapsedSafety, elapsedSafety / fVolumes.size(),
elapsedDistance / elapsedSafety);
}
fResults.push_back(GenerateBenchmarkResult(elapsedDistance, kBenchmarkDistanceToIn, kBenchmarkCuda, fToInBias));
fResults.push_back(GenerateBenchmarkResult(elapsedSafety, kBenchmarkSafetyToIn, kBenchmarkCuda, fToInBias));
hipDeviceSynchronize();
}
void Benchmarker::RunToOutCuda(Precision *const posX, Precision *const posY, Precision *const posZ,
Precision *const dirX, Precision *const dirY, Precision *const dirZ,
Precision *const distances, Precision *const safeties)
{
typedef cxx::DevicePtr<vecgeom::cuda::VPlacedVolume> CudaVolume;
typedef vecgeom::cuda::SOA3D<Precision> CudaSOA3D;
double elapsedDistance;
double elapsedSafety;
if (fVerbosity > 0) printf("CUDA - ");
std::list<CudaVolume> volumesGpu;
GetVolumePointers(volumesGpu);
Precision *posXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
cxx::CopyToGpu(posX, posXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posY, posYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posZ, posZGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirX, dirXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirY, dirYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirZ, dirZGpu, fPointCount * sizeof(Precision));
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, fPointCount);
CudaSOA3D directionGpu = CudaSOA3D(dirXGpu, dirYGpu, dirZGpu, fPointCount);
Precision *distancesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *safetiesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
vecgeom::cuda::LaunchParameters launch = vecgeom::cuda::LaunchParameters(fPointCount);
vecgeom::cuda::Stopwatch timer;
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
hipLaunchKernelGGL(( vecgeom::cuda::SafetyToOutBenchmarkCudaKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0, *v, positionGpu,
fPointCount, safetiesGpu);
}
}
hipDeviceSynchronize();
elapsedSafety = timer.Stop();
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
hipLaunchKernelGGL(( vecgeom::cuda::DistanceToOutBenchmarkCudaKernel), dim3(launch.grid_size), dim3(launch.block_size), 0, 0,
*v, positionGpu, directionGpu, fPointCount, distancesGpu);
}
}
hipDeviceSynchronize();
elapsedDistance = timer.Stop();
cxx::CopyFromGpu(distancesGpu, distances, fPointCount * sizeof(Precision));
cxx::CopyFromGpu(safetiesGpu, safeties, fPointCount * sizeof(Precision));
if (fVerbosity > 0) {
printf("DistanceToOut: %.6fs (%.6fs), SafetyToOut: %.6fs (%.6fs), "
"DistanceToOut/SafetyToOut: %.2f\n",
elapsedDistance, elapsedDistance / fVolumes.size(), elapsedSafety, elapsedSafety / fVolumes.size(),
elapsedDistance / elapsedSafety);
}
fResults.push_back(GenerateBenchmarkResult(elapsedDistance, kBenchmarkDistanceToOut, kBenchmarkCuda, 1));
fResults.push_back(GenerateBenchmarkResult(elapsedSafety, kBenchmarkSafetyToOut, kBenchmarkCuda, 1));
cxx::FreeFromGpu(safetiesGpu);
cxx::FreeFromGpu(posXGpu);
cxx::FreeFromGpu(posYGpu);
cxx::FreeFromGpu(posZGpu);
cxx::FreeFromGpu(dirXGpu);
cxx::FreeFromGpu(dirYGpu);
cxx::FreeFromGpu(dirZGpu);
hipDeviceSynchronize();
}
} // End vecgeom namespace
| 047bb54ac5e1030ab61b80d664c1d0c52a820e09.cu | /// \file Benchmarker.cu
/// \author Johannes de Fine Licht
#include "VecGeom/benchmarking/Benchmarker.h"
#include "VecGeom/base/Stopwatch.h"
#include "VecGeom/backend/cuda/Backend.h"
#include "VecGeom/management/CudaManager.h"
namespace vecgeom {
inline namespace cuda {
__global__ void ContainsBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, bool *const contains)
{
const int i = ThreadIndex();
if (i >= n) return;
contains[i] = volume->Contains(positions[i]);
}
__global__ void InsideBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, Inside_t *const inside)
{
const int i = ThreadIndex();
if (i >= n) return;
inside[i] = volume->Inside(positions[i]);
}
__global__ void DistanceToInBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const SOA3D<Precision> directions, const int n,
Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->DistanceToIn(positions[i], directions[i]);
}
__global__ void DistanceToOutBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const SOA3D<Precision> directions, const int n,
Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->DistanceToOut(positions[i], directions[i]);
}
__global__ void SafetyToInBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->SafetyToIn(positions[i]);
}
__global__ void SafetyToOutBenchmarkCudaKernel(VPlacedVolume const *const volume, const SOA3D<Precision> positions,
const int n, Precision *const distance)
{
const int i = ThreadIndex();
if (i >= n) return;
distance[i] = volume->SafetyToOut(positions[i]);
}
} // End cuda namespace
// This is odd ... We are implementing one of the member function of cxx class
// in a .cu file
void Benchmarker::RunInsideCuda(Precision *const posX, Precision *const posY, Precision *const posZ,
bool *const contains, Inside_t *const inside)
{
typedef cxx::DevicePtr<vecgeom::cuda::VPlacedVolume> CudaVolume;
typedef vecgeom::cuda::SOA3D<Precision> CudaSOA3D;
if (fVerbosity > 0) printf("CUDA - ");
std::list<CudaVolume> volumesGpu;
GetVolumePointers(volumesGpu);
vecgeom::cuda::LaunchParameters launch = vecgeom::cuda::LaunchParameters(fPointCount);
Stopwatch timer;
cxx::DevicePtr<Precision> posXGpu;
posXGpu.Allocate(fPointCount);
cxx::DevicePtr<Precision> posYGpu;
posYGpu.Allocate(fPointCount);
cxx::DevicePtr<Precision> posZGpu;
posZGpu.Allocate(fPointCount);
posXGpu.ToDevice(posX, fPointCount);
posYGpu.ToDevice(posY, fPointCount);
posZGpu.ToDevice(posZ, fPointCount);
bool *containsGpu = cxx::AllocateOnGpu<bool>(sizeof(bool) * fPointCount);
Inside_t *insideGpu = cxx::AllocateOnGpu<Inside_t>(sizeof(Inside_t) * fPointCount);
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, fPointCount);
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
vecgeom::cuda::InsideBenchmarkCudaKernel<<<launch.grid_size, launch.block_size>>>(*v, positionGpu, fPointCount,
insideGpu);
}
}
cudaDeviceSynchronize();
Precision elapsedInside = timer.Stop();
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
vecgeom::cuda::ContainsBenchmarkCudaKernel<<<launch.grid_size, launch.block_size>>>(*v, positionGpu, fPointCount,
containsGpu);
}
}
cudaDeviceSynchronize();
Precision elapsedContains = timer.Stop();
cxx::CopyFromGpu(insideGpu, inside, fPointCount * sizeof(Inside_t));
cxx::CopyFromGpu(containsGpu, contains, fPointCount * sizeof(bool));
cxx::FreeFromGpu(containsGpu);
cxx::FreeFromGpu(insideGpu);
posXGpu.Deallocate();
posYGpu.Deallocate();
posZGpu.Deallocate();
if (fVerbosity > 0) {
printf("Inside: %.6fs (%.6fs), Contains: %.6fs (%.6fs), "
"Inside/Contains: %.2f\n",
elapsedInside, elapsedInside / fVolumes.size(), elapsedContains, elapsedContains / fVolumes.size(),
elapsedInside / elapsedContains);
}
fResults.push_back(GenerateBenchmarkResult(elapsedContains, kBenchmarkContains, kBenchmarkCuda, fInsideBias));
fResults.push_back(GenerateBenchmarkResult(elapsedInside, kBenchmarkInside, kBenchmarkCuda, fInsideBias));
cudaDeviceSynchronize();
}
void Benchmarker::RunToInCuda(Precision *const posX, Precision *const posY, Precision *const posZ,
Precision *const dirX, Precision *const dirY, Precision *const dirZ,
Precision *const distances, Precision *const safeties)
{
typedef cxx::DevicePtr<vecgeom::cuda::VPlacedVolume> CudaVolume;
typedef vecgeom::cuda::SOA3D<Precision> CudaSOA3D;
if (fVerbosity > 0) printf("CUDA - ");
std::list<CudaVolume> volumesGpu;
GetVolumePointers(volumesGpu);
vecgeom::cuda::LaunchParameters launch = vecgeom::cuda::LaunchParameters(fPointCount);
vecgeom::cuda::Stopwatch timer;
Precision *posXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
cxx::CopyToGpu(posX, posXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posY, posYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posZ, posZGpu, fPointCount * sizeof(Precision));
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, fPointCount);
Precision *dirXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
cxx::CopyToGpu(dirX, dirXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirY, dirYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirZ, dirZGpu, fPointCount * sizeof(Precision));
CudaSOA3D directionGpu = CudaSOA3D(dirXGpu, dirYGpu, dirZGpu, fPointCount);
Precision *distancesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *safetiesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
vecgeom::cuda::SafetyToInBenchmarkCudaKernel<<<launch.grid_size, launch.block_size>>>(*v, positionGpu,
fPointCount, safetiesGpu);
}
}
cudaDeviceSynchronize();
Precision elapsedSafety = timer.Stop();
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
vecgeom::cuda::DistanceToInBenchmarkCudaKernel<<<launch.grid_size, launch.block_size>>>(
*v, positionGpu, directionGpu, fPointCount, distancesGpu);
}
}
cudaDeviceSynchronize();
Precision elapsedDistance = timer.Stop();
cxx::CopyFromGpu(safetiesGpu, safeties, fPointCount * sizeof(Precision));
cxx::CopyFromGpu(distancesGpu, distances, fPointCount * sizeof(Precision));
cxx::FreeFromGpu(distancesGpu);
cxx::FreeFromGpu(dirXGpu);
cxx::FreeFromGpu(dirYGpu);
cxx::FreeFromGpu(dirZGpu);
cxx::FreeFromGpu(safetiesGpu);
cxx::FreeFromGpu(posXGpu);
cxx::FreeFromGpu(posYGpu);
cxx::FreeFromGpu(posZGpu);
if (fVerbosity > 0) {
printf("DistanceToIn: %.6fs (%.6fs), SafetyToIn: %.6fs (%.6fs), "
"DistanceToIn/SafetyToIn: %.2f\n",
elapsedDistance, elapsedDistance / fVolumes.size(), elapsedSafety, elapsedSafety / fVolumes.size(),
elapsedDistance / elapsedSafety);
}
fResults.push_back(GenerateBenchmarkResult(elapsedDistance, kBenchmarkDistanceToIn, kBenchmarkCuda, fToInBias));
fResults.push_back(GenerateBenchmarkResult(elapsedSafety, kBenchmarkSafetyToIn, kBenchmarkCuda, fToInBias));
cudaDeviceSynchronize();
}
void Benchmarker::RunToOutCuda(Precision *const posX, Precision *const posY, Precision *const posZ,
Precision *const dirX, Precision *const dirY, Precision *const dirZ,
Precision *const distances, Precision *const safeties)
{
typedef cxx::DevicePtr<vecgeom::cuda::VPlacedVolume> CudaVolume;
typedef vecgeom::cuda::SOA3D<Precision> CudaSOA3D;
double elapsedDistance;
double elapsedSafety;
if (fVerbosity > 0) printf("CUDA - ");
std::list<CudaVolume> volumesGpu;
GetVolumePointers(volumesGpu);
Precision *posXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *posZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirXGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirYGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *dirZGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
cxx::CopyToGpu(posX, posXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posY, posYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(posZ, posZGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirX, dirXGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirY, dirYGpu, fPointCount * sizeof(Precision));
cxx::CopyToGpu(dirZ, dirZGpu, fPointCount * sizeof(Precision));
CudaSOA3D positionGpu = CudaSOA3D(posXGpu, posYGpu, posZGpu, fPointCount);
CudaSOA3D directionGpu = CudaSOA3D(dirXGpu, dirYGpu, dirZGpu, fPointCount);
Precision *distancesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
Precision *safetiesGpu = cxx::AllocateOnGpu<Precision>(sizeof(Precision) * fPointCount);
vecgeom::cuda::LaunchParameters launch = vecgeom::cuda::LaunchParameters(fPointCount);
vecgeom::cuda::Stopwatch timer;
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
vecgeom::cuda::SafetyToOutBenchmarkCudaKernel<<<launch.grid_size, launch.block_size>>>(*v, positionGpu,
fPointCount, safetiesGpu);
}
}
cudaDeviceSynchronize();
elapsedSafety = timer.Stop();
timer.Start();
for (unsigned r = 0; r < fRepetitions; ++r) {
for (std::list<CudaVolume>::const_iterator v = volumesGpu.begin(), vEnd = volumesGpu.end(); v != vEnd; ++v) {
vecgeom::cuda::DistanceToOutBenchmarkCudaKernel<<<launch.grid_size, launch.block_size>>>(
*v, positionGpu, directionGpu, fPointCount, distancesGpu);
}
}
cudaDeviceSynchronize();
elapsedDistance = timer.Stop();
cxx::CopyFromGpu(distancesGpu, distances, fPointCount * sizeof(Precision));
cxx::CopyFromGpu(safetiesGpu, safeties, fPointCount * sizeof(Precision));
if (fVerbosity > 0) {
printf("DistanceToOut: %.6fs (%.6fs), SafetyToOut: %.6fs (%.6fs), "
"DistanceToOut/SafetyToOut: %.2f\n",
elapsedDistance, elapsedDistance / fVolumes.size(), elapsedSafety, elapsedSafety / fVolumes.size(),
elapsedDistance / elapsedSafety);
}
fResults.push_back(GenerateBenchmarkResult(elapsedDistance, kBenchmarkDistanceToOut, kBenchmarkCuda, 1));
fResults.push_back(GenerateBenchmarkResult(elapsedSafety, kBenchmarkSafetyToOut, kBenchmarkCuda, 1));
cxx::FreeFromGpu(safetiesGpu);
cxx::FreeFromGpu(posXGpu);
cxx::FreeFromGpu(posYGpu);
cxx::FreeFromGpu(posZGpu);
cxx::FreeFromGpu(dirXGpu);
cxx::FreeFromGpu(dirYGpu);
cxx::FreeFromGpu(dirZGpu);
cudaDeviceSynchronize();
}
} // End vecgeom namespace
|
f63ac1dec35a46662afd3b88ca01195178f2575e.hip | // !!! This is a file automatically generated by hipify!!!
/*$Id: main.cu 738 2009-11-13 16:08:10Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
/******************************************************************
*Inverted Index (II): It scans a set of HTML files
*and extracts the positions for all links. Each Map
*processes one line of HTML files. For each link it
*finds, it outputs an intermediate pair with the link as the
*key and the position as the value. No Reduce stage is
*required.
*****************************************************************/
#include "MarsInc.h"
#include <dirent.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "global.h"
#define __OUTPUT__
typedef struct flist{
char *data;
char *name;
int fd;
int size;
} filelist_t;
int count = 0;
filelist_t *makeup(char *dirname)
{
filelist_t *filelist = NULL;
struct dirent **namelist;
int n = scandir(dirname, &namelist, 0, alphasort);
printf("scandir n = %d\n",n);
if (n < 0)
{
printf("sacn dir failed!\n");
exit(-1);
}
filelist = (filelist_t*)malloc(sizeof(filelist_t)*n);
int i;
for (i = 0; i < n; i++)
{
if (strcmp(namelist[i]->d_name, ".") != 0 &&
strcmp(namelist[i]->d_name, "..") != 0)
{
filelist[count].name = strdup(dirname);
strcat(filelist[count].name, namelist[i]->d_name);
printf("file name = %s\n ", filelist[count].name);
struct stat finfo;
filelist[count].fd = open(filelist[count].name, O_RDONLY);
fstat(filelist[count].fd, &finfo);
filelist[count].size = finfo.st_size + 1;
filelist[count].data = (char*)malloc(filelist[count].size);
read(filelist[count].fd, filelist[count].data, finfo.st_size);
filelist[count].data[filelist[count].size - 1] = '\0';
//filelist[count].data = (char*)mmap(0, finfo.st_size + 1, PROT_READ | PROT_WRITE, MAP_PRIVATE, filelist[count].fd, 0);
//printf("%s\n", filelist[count].data);
count++;
}
}
return filelist;
}
void cleanup(filelist_t *filelist)
{
int i;
for (i = 0; i < count; i++)
{
free(filelist[i].name);
munmap(filelist[i].data, filelist[i].size+1);
//free(filelist[i].data);
close(filelist[i].fd);
}
free(filelist);
}
void validate(Spec_t* spec, int num, filelist_t* filelist, char** d_data)
{
int4* offsetSizes = (int4*)spec->outputOffsetSizes;
int2* groupInfo = (int2*)spec->outputKeyListRange;
for (int i = 0; i < count; i++)
checkCudaErrors(hipMemcpy(filelist[i].data, d_data[i], filelist[i].size, hipMemcpyDeviceToHost));
if (num > spec->outputDiffKeyCount)
num = spec->outputDiffKeyCount;
printf("# of Groups: %d, # of records:%d\n", spec->outputDiffKeyCount, spec->outputRecordCount);
for (int i = 0; i < num; i++)
{
II_KEY_T* urls = (II_KEY_T*)(spec->outputKeys + offsetSizes[groupInfo[i].x].x);
int* fids = (int*)(spec->outputVals + offsetSizes[groupInfo[i].x].z);
printf("========Start:%d, End:%d, URL: %s===========\n", groupInfo[i].x, groupInfo[i].y, filelist[*fids].data + urls->url_offset);
printf("FILE LIST: ");
int groupSize = groupInfo[i].y - groupInfo[i].x;
for (int j = 0; j < groupSize; j++)
printf("%s \n", filelist[fids[j]].name);
printf("\n");
}
}
//------------------------------------------------------------------
//usage: InvertedIndex <dir>
//param: dir the directory including HTML files
//------------------------------------------------------------------
int main( int argc, char** argv)
{
if (argc != 2)
{
printf("usage: %s <dir>\n", argv[0]);
exit(-1);
}
Spec_t *spec = GetDefaultSpec();
spec->workflow = MAP_GROUP;
#ifdef __OUTPUT__
spec->outputToHost = 1;
#endif
TimeVal_t alltimer;
startTimer(&alltimer);
//-------------------------------------------------------------
//make map input
//-------------------------------------------------------------
TimeVal_t readtimer;
startTimer(&readtimer);
filelist_t *filelist = makeup(argv[1]);
II_KEY_T key;
II_VAL_T val;
char** data = (char**)malloc(sizeof(char*)*count);
for (int i = 0; i < count; i++)
{
data[i] = NULL;
checkCudaErrors(hipMalloc((void**)&data[i], filelist[i].size));
key.file_buf = data[i];
val.file_id = i;
int offset = 0;
char* p = filelist[i].data;
char* start = p;
while (1)
{
int blockSize = 1024;
if (offset + blockSize > filelist[i].size) blockSize = filelist[i].size - offset;
p += blockSize;
for (; *p != '\n' && *p != '\0'; p++);
if (*p != '\0')
{
*p = '\0';
++p;
blockSize = (int)(p - start);
val.block_size = blockSize;
val.block_offset = offset;
AddMapInputRecord(spec, &key, &val, sizeof(II_KEY_T), sizeof(II_VAL_T));
offset += blockSize;
start = p;
}
else
{
blockSize = (int)(filelist[i].size - offset);
val.block_size = blockSize;
val.block_offset = offset;
AddMapInputRecord(spec, &key, &val, sizeof(II_KEY_T), sizeof(II_VAL_T));
break;
}
}
checkCudaErrors(hipMemcpy(data[i], filelist[i].data, filelist[i].size, hipMemcpyHostToDevice));
}
endTimer("io-test", &readtimer);
//-------------------------------------------------------------
//start MapReduce procedure
//-------------------------------------------------------------
MapReduce(spec);
endTimer("all", &alltimer);
//-------------------------------------------------------------
//start MapReduce procedure
//-------------------------------------------------------------
freopen("a.txt", "w", stdout);
#ifdef __OUTPUT__
validate(spec, 100000, filelist, data);
#endif
//------------------------------------------------------------
//finish
//------------------------------------------------------------
FinishMapReduce(spec);
for (int i = 0; i < count; i++)
checkCudaErrors(hipFree(data[i]));
cleanup(filelist);
free(data);
return 0;
}
| f63ac1dec35a46662afd3b88ca01195178f2575e.cu | /*$Id: main.cu 738 2009-11-13 16:08:10Z wenbinor $*/
/**
*This is the source code for Mars, a MapReduce framework on graphics
*processors.
*Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia)
*Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com).
*If you have any question on the code, please contact us at
* [email protected] or [email protected]
*
*The license is a free non-exclusive, non-transferable license to reproduce,
*use, modify and display the source code version of the Software, with or
*without modifications solely for non-commercial research, educational or
*evaluation purposes. The license does not entitle Licensee to technical support,
*telephone assistance, enhancements or updates to the Software. All rights, title
*to and ownership interest in Mars, including all intellectual property rights
*therein shall remain in HKUST.
*/
/******************************************************************
*Inverted Index (II): It scans a set of HTML files
*and extracts the positions for all links. Each Map
*processes one line of HTML files. For each link it
*finds, it outputs an intermediate pair with the link as the
*key and the position as the value. No Reduce stage is
*required.
*****************************************************************/
#include "MarsInc.h"
#include <dirent.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "global.h"
#define __OUTPUT__
typedef struct flist{
char *data;
char *name;
int fd;
int size;
} filelist_t;
int count = 0;
filelist_t *makeup(char *dirname)
{
filelist_t *filelist = NULL;
struct dirent **namelist;
int n = scandir(dirname, &namelist, 0, alphasort);
printf("scandir n = %d\n",n);
if (n < 0)
{
printf("sacn dir failed!\n");
exit(-1);
}
filelist = (filelist_t*)malloc(sizeof(filelist_t)*n);
int i;
for (i = 0; i < n; i++)
{
if (strcmp(namelist[i]->d_name, ".") != 0 &&
strcmp(namelist[i]->d_name, "..") != 0)
{
filelist[count].name = strdup(dirname);
strcat(filelist[count].name, namelist[i]->d_name);
printf("file name = %s\n ", filelist[count].name);
struct stat finfo;
filelist[count].fd = open(filelist[count].name, O_RDONLY);
fstat(filelist[count].fd, &finfo);
filelist[count].size = finfo.st_size + 1;
filelist[count].data = (char*)malloc(filelist[count].size);
read(filelist[count].fd, filelist[count].data, finfo.st_size);
filelist[count].data[filelist[count].size - 1] = '\0';
//filelist[count].data = (char*)mmap(0, finfo.st_size + 1, PROT_READ | PROT_WRITE, MAP_PRIVATE, filelist[count].fd, 0);
//printf("%s\n", filelist[count].data);
count++;
}
}
return filelist;
}
void cleanup(filelist_t *filelist)
{
int i;
for (i = 0; i < count; i++)
{
free(filelist[i].name);
munmap(filelist[i].data, filelist[i].size+1);
//free(filelist[i].data);
close(filelist[i].fd);
}
free(filelist);
}
void validate(Spec_t* spec, int num, filelist_t* filelist, char** d_data)
{
int4* offsetSizes = (int4*)spec->outputOffsetSizes;
int2* groupInfo = (int2*)spec->outputKeyListRange;
for (int i = 0; i < count; i++)
checkCudaErrors(cudaMemcpy(filelist[i].data, d_data[i], filelist[i].size, cudaMemcpyDeviceToHost));
if (num > spec->outputDiffKeyCount)
num = spec->outputDiffKeyCount;
printf("# of Groups: %d, # of records:%d\n", spec->outputDiffKeyCount, spec->outputRecordCount);
for (int i = 0; i < num; i++)
{
II_KEY_T* urls = (II_KEY_T*)(spec->outputKeys + offsetSizes[groupInfo[i].x].x);
int* fids = (int*)(spec->outputVals + offsetSizes[groupInfo[i].x].z);
printf("========Start:%d, End:%d, URL: %s===========\n", groupInfo[i].x, groupInfo[i].y, filelist[*fids].data + urls->url_offset);
printf("FILE LIST: ");
int groupSize = groupInfo[i].y - groupInfo[i].x;
for (int j = 0; j < groupSize; j++)
printf("%s \n", filelist[fids[j]].name);
printf("\n");
}
}
//------------------------------------------------------------------
//usage: InvertedIndex <dir>
//param: dir the directory including HTML files
//------------------------------------------------------------------
int main( int argc, char** argv)
{
if (argc != 2)
{
printf("usage: %s <dir>\n", argv[0]);
exit(-1);
}
Spec_t *spec = GetDefaultSpec();
spec->workflow = MAP_GROUP;
#ifdef __OUTPUT__
spec->outputToHost = 1;
#endif
TimeVal_t alltimer;
startTimer(&alltimer);
//-------------------------------------------------------------
//make map input
//-------------------------------------------------------------
TimeVal_t readtimer;
startTimer(&readtimer);
filelist_t *filelist = makeup(argv[1]);
II_KEY_T key;
II_VAL_T val;
char** data = (char**)malloc(sizeof(char*)*count);
for (int i = 0; i < count; i++)
{
data[i] = NULL;
checkCudaErrors(cudaMalloc((void**)&data[i], filelist[i].size));
key.file_buf = data[i];
val.file_id = i;
int offset = 0;
char* p = filelist[i].data;
char* start = p;
while (1)
{
int blockSize = 1024;
if (offset + blockSize > filelist[i].size) blockSize = filelist[i].size - offset;
p += blockSize;
for (; *p != '\n' && *p != '\0'; p++);
if (*p != '\0')
{
*p = '\0';
++p;
blockSize = (int)(p - start);
val.block_size = blockSize;
val.block_offset = offset;
AddMapInputRecord(spec, &key, &val, sizeof(II_KEY_T), sizeof(II_VAL_T));
offset += blockSize;
start = p;
}
else
{
blockSize = (int)(filelist[i].size - offset);
val.block_size = blockSize;
val.block_offset = offset;
AddMapInputRecord(spec, &key, &val, sizeof(II_KEY_T), sizeof(II_VAL_T));
break;
}
}
checkCudaErrors(cudaMemcpy(data[i], filelist[i].data, filelist[i].size, cudaMemcpyHostToDevice));
}
endTimer("io-test", &readtimer);
//-------------------------------------------------------------
//start MapReduce procedure
//-------------------------------------------------------------
MapReduce(spec);
endTimer("all", &alltimer);
//-------------------------------------------------------------
//start MapReduce procedure
//-------------------------------------------------------------
freopen("a.txt", "w", stdout);
#ifdef __OUTPUT__
validate(spec, 100000, filelist, data);
#endif
//------------------------------------------------------------
//finish
//------------------------------------------------------------
FinishMapReduce(spec);
for (int i = 0; i < count; i++)
checkCudaErrors(cudaFree(data[i]));
cleanup(filelist);
free(data);
return 0;
}
|
4cd9104500b271488cb7515f16bd9a1df72944f4.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
at::namedinference::propagate_names(result, src);
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
#endif
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
| 4cd9104500b271488cb7515f16bd9a1df72944f4.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
at::namedinference::propagate_names(result, src);
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
#endif
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
|
d95535203cc188068ccdb5a2aead50c77d9189be.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| d95535203cc188068ccdb5a2aead50c77d9189be.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
218ad95d288897891ace20cb74383a5d363682e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <ctime>
using namespace thrust;
const double EPSILON = 10E-8;
void SwapRows(double* matrix,
int currentRow,
int otherRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (auto i = 0; i < columnCount; i++)
{
auto temp = matrix[i * rowCount + currentRow];
matrix[i * rowCount + currentRow] = matrix[i * rowCount + otherRow];
matrix[i * rowCount + otherRow] = temp;
}
}
void CalculateCurrentRow(double* matrix,
int currentRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (auto i = 0; i < columnCount; i++)
{
matrix[i * rowCount + currentRow] /= matrix[currentColumn * rowCount + currentRow];
}
}
void CalculateRows(double* matrix,
int currentRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (int j = 0; j < rowCount; j++)
{
for (int k = 0; k < columnCount; k++)
{
matrix[k * rowCount + j] -=
matrix[currentColumn * rowCount + j] * matrix[k * rowCount + currentRow];
}
}
}
void SetCurrentZero(double* matrix,
int currentRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (auto i = 0; i < columnCount; i++)
{
matrix[i * rowCount + currentRow] = 0;
matrix[currentColumn * rowCount + i] = 0;
}
}
int GetMaxIndexInColumn(double* deviceMatrix,
int rowIndex,
int columnIndex,
int rowCount,
int columnCount)
{
auto maxElem = 0.0;
auto maxIndex = 0;
for (int i = columnIndex * rowCount + rowIndex; i < columnIndex * rowCount + rowCount; i++)
{
if (fabs(deviceMatrix[i]) > maxElem)
{
maxIndex = i;
maxElem = fabs(deviceMatrix[i]);
}
}
if (fabs(maxElem) < EPSILON)
{
return -1;
}
return maxIndex - columnIndex * rowCount;
}
int FindRank(double* matrix, int rowCount, int columnCount)
{
auto offset = 0;
for (int i = 0; i < rowCount && i + offset < columnCount; i++)
{
auto maxIndex = GetMaxIndexInColumn(matrix, i, i + offset, rowCount, columnCount);
if (maxIndex < 0)
{
offset++;
i--;
continue;
}
if (maxIndex != i)
{
SwapRows(matrix, i, maxIndex, i + offset, rowCount, columnCount);
}
CalculateCurrentRow(matrix, i, i + offset, rowCount, columnCount);
CalculateRows(matrix, i, i + offset, rowCount, columnCount);
SetCurrentZero(matrix, i, i + offset, rowCount, columnCount);
}
auto rank = columnCount - offset > rowCount
? rowCount
: columnCount - offset;
return rank;
}
int main()
{
std::ios_base::sync_with_stdio(false);
std::cin.tie(nullptr);
int rowCount, columnCount;
std::cin >> rowCount >> columnCount;
auto isTransposed = rowCount < columnCount;
if (isTransposed)
{
auto temp = rowCount;
rowCount = columnCount;
columnCount = temp;
}
auto matrix = new double[rowCount * columnCount];
clock_t begin = clock();
for (int i = 0; i < rowCount; i++)
{
for (int j = 0; j < columnCount; j++)
{
std::cin >> (isTransposed
? matrix[i * columnCount + j]
: matrix[j * rowCount + i]);
}
}
auto rank = FindRank(matrix, rowCount, columnCount);
clock_t end = clock();
std::cout << double(end - begin) / CLOCKS_PER_SEC << std::endl;
delete[] matrix;
}
| 218ad95d288897891ace20cb74383a5d363682e0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <ctime>
using namespace thrust;
const double EPSILON = 10E-8;
void SwapRows(double* matrix,
int currentRow,
int otherRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (auto i = 0; i < columnCount; i++)
{
auto temp = matrix[i * rowCount + currentRow];
matrix[i * rowCount + currentRow] = matrix[i * rowCount + otherRow];
matrix[i * rowCount + otherRow] = temp;
}
}
void CalculateCurrentRow(double* matrix,
int currentRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (auto i = 0; i < columnCount; i++)
{
matrix[i * rowCount + currentRow] /= matrix[currentColumn * rowCount + currentRow];
}
}
void CalculateRows(double* matrix,
int currentRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (int j = 0; j < rowCount; j++)
{
for (int k = 0; k < columnCount; k++)
{
matrix[k * rowCount + j] -=
matrix[currentColumn * rowCount + j] * matrix[k * rowCount + currentRow];
}
}
}
void SetCurrentZero(double* matrix,
int currentRow,
int currentColumn,
int rowCount,
int columnCount)
{
for (auto i = 0; i < columnCount; i++)
{
matrix[i * rowCount + currentRow] = 0;
matrix[currentColumn * rowCount + i] = 0;
}
}
int GetMaxIndexInColumn(double* deviceMatrix,
int rowIndex,
int columnIndex,
int rowCount,
int columnCount)
{
auto maxElem = 0.0;
auto maxIndex = 0;
for (int i = columnIndex * rowCount + rowIndex; i < columnIndex * rowCount + rowCount; i++)
{
if (fabs(deviceMatrix[i]) > maxElem)
{
maxIndex = i;
maxElem = fabs(deviceMatrix[i]);
}
}
if (fabs(maxElem) < EPSILON)
{
return -1;
}
return maxIndex - columnIndex * rowCount;
}
int FindRank(double* matrix, int rowCount, int columnCount)
{
auto offset = 0;
for (int i = 0; i < rowCount && i + offset < columnCount; i++)
{
auto maxIndex = GetMaxIndexInColumn(matrix, i, i + offset, rowCount, columnCount);
if (maxIndex < 0)
{
offset++;
i--;
continue;
}
if (maxIndex != i)
{
SwapRows(matrix, i, maxIndex, i + offset, rowCount, columnCount);
}
CalculateCurrentRow(matrix, i, i + offset, rowCount, columnCount);
CalculateRows(matrix, i, i + offset, rowCount, columnCount);
SetCurrentZero(matrix, i, i + offset, rowCount, columnCount);
}
auto rank = columnCount - offset > rowCount
? rowCount
: columnCount - offset;
return rank;
}
int main()
{
std::ios_base::sync_with_stdio(false);
std::cin.tie(nullptr);
int rowCount, columnCount;
std::cin >> rowCount >> columnCount;
auto isTransposed = rowCount < columnCount;
if (isTransposed)
{
auto temp = rowCount;
rowCount = columnCount;
columnCount = temp;
}
auto matrix = new double[rowCount * columnCount];
clock_t begin = clock();
for (int i = 0; i < rowCount; i++)
{
for (int j = 0; j < columnCount; j++)
{
std::cin >> (isTransposed
? matrix[i * columnCount + j]
: matrix[j * rowCount + i]);
}
}
auto rank = FindRank(matrix, rowCount, columnCount);
clock_t end = clock();
std::cout << double(end - begin) / CLOCKS_PER_SEC << std::endl;
delete[] matrix;
}
|
53999766ba7185cb150ebcd7f90b9d5b745f87a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/unique.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2>
__global__
void unique_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
*result = thrust::unique(thrust::seq, first, last);
}
template<typename Iterator1, typename BinaryPredicate, typename Iterator2>
__global__
void unique_kernel(Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result)
{
*result = thrust::unique(thrust::seq, first, last, pred);
}
template<typename T>
struct is_equal_div_10_unique
{
__host__ __device__
bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); }
};
void TestUniqueDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector data(10);
data[0] = 11;
data[1] = 11;
data[2] = 12;
data[3] = 20;
data[4] = 29;
data[5] = 21;
data[6] = 21;
data[7] = 31;
data[8] = 31;
data[9] = 37;
thrust::device_vector<Vector::iterator> new_last_vec(1);
Vector::iterator new_last;
hipLaunchKernelGGL(( unique_kernel), dim3(1),dim3(1), 0, 0, data.begin(), data.end(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - data.begin(), 7);
ASSERT_EQUAL(data[0], 11);
ASSERT_EQUAL(data[1], 12);
ASSERT_EQUAL(data[2], 20);
ASSERT_EQUAL(data[3], 29);
ASSERT_EQUAL(data[4], 21);
ASSERT_EQUAL(data[5], 31);
ASSERT_EQUAL(data[6], 37);
hipLaunchKernelGGL(( unique_kernel), dim3(1),dim3(1), 0, 0, data.begin(), new_last, is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - data.begin(), 3);
ASSERT_EQUAL(data[0], 11);
ASSERT_EQUAL(data[1], 20);
ASSERT_EQUAL(data[2], 31);
}
DECLARE_UNITTEST(TestUniqueDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void unique_copy_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, Iterator3 result2)
{
*result2 = thrust::unique_copy(thrust::seq, first, last, result1);
}
template<typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3>
__global__
void unique_copy_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, BinaryPredicate pred, Iterator3 result2)
{
*result2 = thrust::unique_copy(thrust::seq, first, last, result1, pred);
}
void TestUniqueCopyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector data(10);
data[0] = 11;
data[1] = 11;
data[2] = 12;
data[3] = 20;
data[4] = 29;
data[5] = 21;
data[6] = 21;
data[7] = 31;
data[8] = 31;
data[9] = 37;
Vector output(10, -1);
thrust::device_vector<Vector::iterator> new_last_vec(1);
Vector::iterator new_last;
hipLaunchKernelGGL(( unique_copy_kernel), dim3(1),dim3(1), 0, 0, data.begin(), data.end(), output.begin(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - output.begin(), 7);
ASSERT_EQUAL(output[0], 11);
ASSERT_EQUAL(output[1], 12);
ASSERT_EQUAL(output[2], 20);
ASSERT_EQUAL(output[3], 29);
ASSERT_EQUAL(output[4], 21);
ASSERT_EQUAL(output[5], 31);
ASSERT_EQUAL(output[6], 37);
hipLaunchKernelGGL(( unique_copy_kernel), dim3(1),dim3(1), 0, 0, output.begin(), new_last, data.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - data.begin(), 3);
ASSERT_EQUAL(data[0], 11);
ASSERT_EQUAL(data[1], 20);
ASSERT_EQUAL(data[2], 31);
}
DECLARE_UNITTEST(TestUniqueCopyDeviceSeq);
| 53999766ba7185cb150ebcd7f90b9d5b745f87a1.cu | #include <unittest/unittest.h>
#include <thrust/unique.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2>
__global__
void unique_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
*result = thrust::unique(thrust::seq, first, last);
}
template<typename Iterator1, typename BinaryPredicate, typename Iterator2>
__global__
void unique_kernel(Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result)
{
*result = thrust::unique(thrust::seq, first, last, pred);
}
template<typename T>
struct is_equal_div_10_unique
{
__host__ __device__
bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); }
};
void TestUniqueDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector data(10);
data[0] = 11;
data[1] = 11;
data[2] = 12;
data[3] = 20;
data[4] = 29;
data[5] = 21;
data[6] = 21;
data[7] = 31;
data[8] = 31;
data[9] = 37;
thrust::device_vector<Vector::iterator> new_last_vec(1);
Vector::iterator new_last;
unique_kernel<<<1,1>>>(data.begin(), data.end(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - data.begin(), 7);
ASSERT_EQUAL(data[0], 11);
ASSERT_EQUAL(data[1], 12);
ASSERT_EQUAL(data[2], 20);
ASSERT_EQUAL(data[3], 29);
ASSERT_EQUAL(data[4], 21);
ASSERT_EQUAL(data[5], 31);
ASSERT_EQUAL(data[6], 37);
unique_kernel<<<1,1>>>(data.begin(), new_last, is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - data.begin(), 3);
ASSERT_EQUAL(data[0], 11);
ASSERT_EQUAL(data[1], 20);
ASSERT_EQUAL(data[2], 31);
}
DECLARE_UNITTEST(TestUniqueDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3>
__global__
void unique_copy_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, Iterator3 result2)
{
*result2 = thrust::unique_copy(thrust::seq, first, last, result1);
}
template<typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3>
__global__
void unique_copy_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, BinaryPredicate pred, Iterator3 result2)
{
*result2 = thrust::unique_copy(thrust::seq, first, last, result1, pred);
}
void TestUniqueCopyDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::value_type T;
Vector data(10);
data[0] = 11;
data[1] = 11;
data[2] = 12;
data[3] = 20;
data[4] = 29;
data[5] = 21;
data[6] = 21;
data[7] = 31;
data[8] = 31;
data[9] = 37;
Vector output(10, -1);
thrust::device_vector<Vector::iterator> new_last_vec(1);
Vector::iterator new_last;
unique_copy_kernel<<<1,1>>>(data.begin(), data.end(), output.begin(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - output.begin(), 7);
ASSERT_EQUAL(output[0], 11);
ASSERT_EQUAL(output[1], 12);
ASSERT_EQUAL(output[2], 20);
ASSERT_EQUAL(output[3], 29);
ASSERT_EQUAL(output[4], 21);
ASSERT_EQUAL(output[5], 31);
ASSERT_EQUAL(output[6], 37);
unique_copy_kernel<<<1,1>>>(output.begin(), new_last, data.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin());
new_last = new_last_vec[0];
ASSERT_EQUAL(new_last - data.begin(), 3);
ASSERT_EQUAL(data[0], 11);
ASSERT_EQUAL(data[1], 20);
ASSERT_EQUAL(data[2], 31);
}
DECLARE_UNITTEST(TestUniqueCopyDeviceSeq);
|
3b98fedd362fe7bff43e8d667a3c4f26376442d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <opencv2/cudev.hpp>
#include <opencv2/core/matx.hpp>
#include <Aquila/Thrust_interop.hpp>
#include <thrust/sequence.h>
#include <thrust/system/hip/execution_policy.h>
namespace cv
{
namespace cuda
{
void histogram(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& histogram,
float min = 0, float max = 256,
cv::cuda::Stream& stream = cv::cuda::Stream::Null());
}
}
template<typename T>
__host__ __device__ inline T* binary_search_approx(T *const begin, T * end, T value)
{
T* q;
if(begin >= end)
{
return end;
}
//q = (begin + end) / 2;
q = begin + (end - begin) / 2;
if(value == *q)
{
return q;
}else if(value > *q)
{
return binary_search_approx(q + 1, end, value);
}else if(value < *q)
{
return binary_search_approx(begin, q - 1, value);
}
}
template<typename T, int N>
__global__ void histogram_kernel(const cv::cuda::PtrStepSz<cv::Vec<T, N>> input,
const cv::cuda::PtrStepSz<float> bins,
int* histogram, float min_value, float step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y;
const int num_bins = bins.cols;
extern __shared__ int smem[];
for (int i = t; i < N * num_bins + N; i += nt)
smem[i] = 0;
__syncthreads();
for (int row = y; row < input.rows; row += ny)
{
for (int col = x; col < input.cols; col += nx)
{
#pragma unroll
for(int c = 0; c < N; ++c)
{
float val = input(row, col).val[c];
// calculate bin based on min and step
int idx = (val - min_value) / step;
idx = max(0,min(bins.cols, idx));
atomicAdd(&smem[idx * N + c], 1);
//float* bin = binary_search_approx<float>(bins.data, bins.data + bins.cols, float(val));
//int dist = bin - bins.data;
//atomicAdd(&smem[dist * N + c], 1);
}
}
}
__syncthreads();
for (int i = t; i < num_bins; i += nt) {
#pragma unroll
for(int c = 0; c < N; ++c)
{
atomicAdd(histogram + i * N + c, smem[i * N + c]);
}
}
}
template<int N>
__global__ void histogram_kernel_uchar(const cv::cuda::PtrStepSz<cv::Vec<uchar, N>> input,
int* histogram)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y;
const int num_bins = 256;
//extern __shared__ int smem[N * num_bins + N];
extern __shared__ int smem[];
for (int i = t; i < N * num_bins + N; i += nt)
smem[i] = 0;
__syncthreads();
for (int row = y; row < input.rows; row += ny)
{
for (int col = x; col < input.cols; col += nx)
{
#pragma unroll
for(int c = 0; c < N; ++c)
{
uchar val = input(row, col).val[c];
atomicAdd(&smem[val* N + c], 1);
}
}
}
__syncthreads();
for (int i = t; i < num_bins; i += nt) {
#pragma unroll
for(int c = 0; c < N; ++c)
{
atomicAdd(histogram + i * N + c, smem[i * N + c]);
}
}
}
template<class T, int N>
void launch(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& hist, cv::cuda::Stream& stream, float min, float step)
{
dim3 block(16, 16);
dim3 grid(cv::cudev::divUp(in.cols,16), cv::cudev::divUp(in.rows, 16));
hipLaunchKernelGGL(( histogram_kernel<T,N>), dim3(grid), dim3(block), bins.cols * N + N,
cv::cuda::StreamAccessor::getStream(stream),
in, bins, (int*)hist.data, min, step);
}
template<int N>
void launch_uchar(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& hist, cv::cuda::Stream& stream, float min, float step)
{
CV_Assert(in.depth() == CV_8U);
CV_Assert(in.channels() == N);
CV_Assert(hist.cols == 256 && hist.rows == 1 && hist.depth() == CV_32S && hist.channels() == N);
dim3 block(16, 16);
dim3 grid(cv::cudev::divUp(in.cols,16), cv::cudev::divUp(in.rows, 16));
hipLaunchKernelGGL(( histogram_kernel_uchar<N>), dim3(grid), dim3(block), (256 * N + N) * sizeof(int),
cv::cuda::StreamAccessor::getStream(stream),
in, (int*)hist.data);
}
void cv::cuda::histogram(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& histogram,
float min, float max,
cv::cuda::Stream& stream)
{
typedef void(*func_t)(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& hist, cv::cuda::Stream& stream, float min, float step);
int size = 1000;
float step = 1;
if(in.depth() == CV_8U)
{
size = 256;
min = 0;
max = 256;
}
if(bins.empty() && in.depth() != CV_8U)
{
bins.create(1, size, CV_32F);
step = (max - min) / float(size);
thrust::device_ptr<float> ptr = thrust::device_pointer_cast((float*)bins.data);
thrust::sequence(thrust::system::cuda::par.on(cv::cuda::StreamAccessor::getStream(stream)),ptr, ptr + size, min, step);
}
histogram.create(1, size, CV_MAKE_TYPE(CV_32S, in.channels()));
histogram.setTo(cv::Scalar::all(0), stream);
func_t funcs[4][7] =
{
{launch_uchar<1>, 0, launch<ushort, 1>, 0, 0, 0, 0},
{launch_uchar<2>, 0, launch<ushort, 2>, 0, 0, 0, 0},
{launch_uchar<3>, 0, launch<ushort, 3>, 0, 0, 0, 0},
{launch_uchar<4>, 0, launch<ushort, 4>, 0, 0, 0, 0}
};
CV_Assert(funcs[in.channels() - 1][in.depth()]);
funcs[in.channels() - 1][in.depth()](in, bins, histogram, stream, min, step);
}
| 3b98fedd362fe7bff43e8d667a3c4f26376442d4.cu |
#include <cuda_runtime_api.h>
#include <opencv2/cudev.hpp>
#include <opencv2/core/matx.hpp>
#include <Aquila/Thrust_interop.hpp>
#include <thrust/sequence.h>
#include <thrust/system/cuda/execution_policy.h>
namespace cv
{
namespace cuda
{
void histogram(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& histogram,
float min = 0, float max = 256,
cv::cuda::Stream& stream = cv::cuda::Stream::Null());
}
}
template<typename T>
__host__ __device__ inline T* binary_search_approx(T *const begin, T * end, T value)
{
T* q;
if(begin >= end)
{
return end;
}
//q = (begin + end) / 2;
q = begin + (end - begin) / 2;
if(value == *q)
{
return q;
}else if(value > *q)
{
return binary_search_approx(q + 1, end, value);
}else if(value < *q)
{
return binary_search_approx(begin, q - 1, value);
}
}
template<typename T, int N>
__global__ void histogram_kernel(const cv::cuda::PtrStepSz<cv::Vec<T, N>> input,
const cv::cuda::PtrStepSz<float> bins,
int* histogram, float min_value, float step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y;
const int num_bins = bins.cols;
extern __shared__ int smem[];
for (int i = t; i < N * num_bins + N; i += nt)
smem[i] = 0;
__syncthreads();
for (int row = y; row < input.rows; row += ny)
{
for (int col = x; col < input.cols; col += nx)
{
#pragma unroll
for(int c = 0; c < N; ++c)
{
float val = input(row, col).val[c];
// calculate bin based on min and step
int idx = (val - min_value) / step;
idx = max(0,min(bins.cols, idx));
atomicAdd(&smem[idx * N + c], 1);
//float* bin = binary_search_approx<float>(bins.data, bins.data + bins.cols, float(val));
//int dist = bin - bins.data;
//atomicAdd(&smem[dist * N + c], 1);
}
}
}
__syncthreads();
for (int i = t; i < num_bins; i += nt) {
#pragma unroll
for(int c = 0; c < N; ++c)
{
atomicAdd(histogram + i * N + c, smem[i * N + c]);
}
}
}
template<int N>
__global__ void histogram_kernel_uchar(const cv::cuda::PtrStepSz<cv::Vec<uchar, N>> input,
int* histogram)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y;
const int num_bins = 256;
//extern __shared__ int smem[N * num_bins + N];
extern __shared__ int smem[];
for (int i = t; i < N * num_bins + N; i += nt)
smem[i] = 0;
__syncthreads();
for (int row = y; row < input.rows; row += ny)
{
for (int col = x; col < input.cols; col += nx)
{
#pragma unroll
for(int c = 0; c < N; ++c)
{
uchar val = input(row, col).val[c];
atomicAdd(&smem[val* N + c], 1);
}
}
}
__syncthreads();
for (int i = t; i < num_bins; i += nt) {
#pragma unroll
for(int c = 0; c < N; ++c)
{
atomicAdd(histogram + i * N + c, smem[i * N + c]);
}
}
}
template<class T, int N>
void launch(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& hist, cv::cuda::Stream& stream, float min, float step)
{
dim3 block(16, 16);
dim3 grid(cv::cudev::divUp(in.cols,16), cv::cudev::divUp(in.rows, 16));
histogram_kernel<T,N><<<grid, block, bins.cols * N + N,
cv::cuda::StreamAccessor::getStream(stream)>>>(
in, bins, (int*)hist.data, min, step);
}
template<int N>
void launch_uchar(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& hist, cv::cuda::Stream& stream, float min, float step)
{
CV_Assert(in.depth() == CV_8U);
CV_Assert(in.channels() == N);
CV_Assert(hist.cols == 256 && hist.rows == 1 && hist.depth() == CV_32S && hist.channels() == N);
dim3 block(16, 16);
dim3 grid(cv::cudev::divUp(in.cols,16), cv::cudev::divUp(in.rows, 16));
histogram_kernel_uchar<N><<<grid, block, (256 * N + N) * sizeof(int),
cv::cuda::StreamAccessor::getStream(stream)>>>(
in, (int*)hist.data);
}
void cv::cuda::histogram(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& histogram,
float min, float max,
cv::cuda::Stream& stream)
{
typedef void(*func_t)(const cv::cuda::GpuMat& in, cv::cuda::GpuMat& bins, cv::cuda::GpuMat& hist, cv::cuda::Stream& stream, float min, float step);
int size = 1000;
float step = 1;
if(in.depth() == CV_8U)
{
size = 256;
min = 0;
max = 256;
}
if(bins.empty() && in.depth() != CV_8U)
{
bins.create(1, size, CV_32F);
step = (max - min) / float(size);
thrust::device_ptr<float> ptr = thrust::device_pointer_cast((float*)bins.data);
thrust::sequence(thrust::system::cuda::par.on(cv::cuda::StreamAccessor::getStream(stream)),ptr, ptr + size, min, step);
}
histogram.create(1, size, CV_MAKE_TYPE(CV_32S, in.channels()));
histogram.setTo(cv::Scalar::all(0), stream);
func_t funcs[4][7] =
{
{launch_uchar<1>, 0, launch<ushort, 1>, 0, 0, 0, 0},
{launch_uchar<2>, 0, launch<ushort, 2>, 0, 0, 0, 0},
{launch_uchar<3>, 0, launch<ushort, 3>, 0, 0, 0, 0},
{launch_uchar<4>, 0, launch<ushort, 4>, 0, 0, 0, 0}
};
CV_Assert(funcs[in.channels() - 1][in.depth()]);
funcs[in.channels() - 1][in.depth()](in, bins, histogram, stream, min, step);
}
|
f5a50d3f84976719611535bfec665fc425dc45b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
#include <rocblas.h>
namespace lbfgsbcuda {
namespace bmv
{
__global__ void
kernel0(
const realreal* sy,
const int col,
const realreal* v,
const int iPitch,
realreal* p
)
{
const int i = blockIdx.x * blockDim.y + threadIdx.y;
const int k = threadIdx.x;
const int i2 = col + i;
volatile __shared__ realreal sdata[4][9];
realreal mySum = 0;
if(k < i && i < col) {
mySum = sy[i * iPitch + k] * v[k] / sy[k * iPitch + k];
}
sdata[threadIdx.y][k] = mySum;
__syncthreads();
if (k < 4)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile realreal* smem = sdata[threadIdx.y] + k;
*smem = mySum = mySum + smem[4];
*smem = mySum = mySum + smem[2];
*smem = mySum = mySum + smem[1];
}
if(k == 0 && i < col) {
p[i2] = v[i2] + mySum;
}
}
__global__ void
kernel1(
const realreal* sy,
const int col,
const realreal* v,
const int iPitch,
realreal* p
)
{
const int i = blockIdx.x * blockDim.y + threadIdx.y;
const int k = threadIdx.x;
volatile __shared__ realreal sdata[4][9];
realreal mySum = 0;
realreal pre = 0;
if(i < col) {
realreal syii = 1.0 / sy[i * iPitch + i];
pre = -v[i] * syii;
if(k > i && k < col) {
mySum = sy[k * iPitch + i] * p[col + k] * syii;
}
}
sdata[threadIdx.y][k] = mySum;
__syncthreads();
if (k < 4)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile realreal* smem = sdata[threadIdx.y] + k;
*smem = mySum = mySum + smem[4];
*smem = mySum = mySum + smem[2];
*smem = mySum = mySum + smem[1];
}
if(k == 0 && i < col) {
p[i] = pre + mySum;
}
}
void prog0(
const realreal* sy,
const int& col,
const int& iPitch,
const realreal* v,
realreal* p,
const hipStream_t& st)
{
int nblocks = iDivUp(col, 4);
if(col <= 1) {
if(!st) {
hipMemcpy(p + col, v + col, sizeof(realreal), hipMemcpyDeviceToDevice);
} else {
hipMemcpyAsync(p + col, v + col, sizeof(realreal), hipMemcpyDeviceToDevice, st);
}
return;
}
if(!st) {
hipLaunchKernelGGL(( kernel0), dim3(nblocks), dim3(dim3(8, 4)), 0, 0,
sy, col, v, iPitch, p);
} else {
hipLaunchKernelGGL(( kernel0), dim3(nblocks), dim3(dim3(8, 4)), 0, st,
sy, col, v, iPitch, p);
}
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
void prog1(
const realreal* wt,
const int& col,
const int& iPitch,
const realreal* v,
realreal* p,
const hipStream_t& st
)
{
if(st)
hipblasSetStream(cublasHd, st);
cublasRtrsv(cublasHd,
HIPBLAS_FILL_MODE_LOWER,
HIPBLAS_OP_N,
HIPBLAS_DIAG_NON_UNIT,
col,
wt,
iPitch,
p + col,
1);
cublasRtrsv(cublasHd,
HIPBLAS_FILL_MODE_LOWER,
HIPBLAS_OP_T,
HIPBLAS_DIAG_NON_UNIT,
col,
wt,
iPitch,
p + col,
1);
}
void prog2(
const realreal* sy,
realreal* wt,
const int& col,
const int& iPitch,
const realreal* v,
realreal* p,
const hipStream_t& st)
{
int nblocks = iDivUp(col, 4);
if(!st) {
hipLaunchKernelGGL(( kernel1), dim3(nblocks), dim3(dim3(8, 4)), 0, 0,
sy, col, v, iPitch, p);
} else {
hipLaunchKernelGGL(( kernel1), dim3(nblocks), dim3(dim3(8, 4)), 0, st,
sy, col, v, iPitch, p);
}
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
};
}; | f5a50d3f84976719611535bfec665fc425dc45b4.cu | /*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
#include <cublas_v2.h>
namespace lbfgsbcuda {
namespace bmv
{
__global__ void
kernel0(
const realreal* sy,
const int col,
const realreal* v,
const int iPitch,
realreal* p
)
{
const int i = blockIdx.x * blockDim.y + threadIdx.y;
const int k = threadIdx.x;
const int i2 = col + i;
volatile __shared__ realreal sdata[4][9];
realreal mySum = 0;
if(k < i && i < col) {
mySum = sy[i * iPitch + k] * v[k] / sy[k * iPitch + k];
}
sdata[threadIdx.y][k] = mySum;
__syncthreads();
if (k < 4)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile realreal* smem = sdata[threadIdx.y] + k;
*smem = mySum = mySum + smem[4];
*smem = mySum = mySum + smem[2];
*smem = mySum = mySum + smem[1];
}
if(k == 0 && i < col) {
p[i2] = v[i2] + mySum;
}
}
__global__ void
kernel1(
const realreal* sy,
const int col,
const realreal* v,
const int iPitch,
realreal* p
)
{
const int i = blockIdx.x * blockDim.y + threadIdx.y;
const int k = threadIdx.x;
volatile __shared__ realreal sdata[4][9];
realreal mySum = 0;
realreal pre = 0;
if(i < col) {
realreal syii = 1.0 / sy[i * iPitch + i];
pre = -v[i] * syii;
if(k > i && k < col) {
mySum = sy[k * iPitch + i] * p[col + k] * syii;
}
}
sdata[threadIdx.y][k] = mySum;
__syncthreads();
if (k < 4)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile realreal* smem = sdata[threadIdx.y] + k;
*smem = mySum = mySum + smem[4];
*smem = mySum = mySum + smem[2];
*smem = mySum = mySum + smem[1];
}
if(k == 0 && i < col) {
p[i] = pre + mySum;
}
}
void prog0(
const realreal* sy,
const int& col,
const int& iPitch,
const realreal* v,
realreal* p,
const cudaStream_t& st)
{
int nblocks = iDivUp(col, 4);
if(col <= 1) {
if(!st) {
cudaMemcpy(p + col, v + col, sizeof(realreal), cudaMemcpyDeviceToDevice);
} else {
cudaMemcpyAsync(p + col, v + col, sizeof(realreal), cudaMemcpyDeviceToDevice, st);
}
return;
}
if(!st) {
kernel0<<<nblocks, dim3(8, 4)>>>
(sy, col, v, iPitch, p);
} else {
kernel0<<<nblocks, dim3(8, 4), 0, st>>>
(sy, col, v, iPitch, p);
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
void prog1(
const realreal* wt,
const int& col,
const int& iPitch,
const realreal* v,
realreal* p,
const cudaStream_t& st
)
{
if(st)
cublasSetStream(cublasHd, st);
cublasRtrsv(cublasHd,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_N,
CUBLAS_DIAG_NON_UNIT,
col,
wt,
iPitch,
p + col,
1);
cublasRtrsv(cublasHd,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_T,
CUBLAS_DIAG_NON_UNIT,
col,
wt,
iPitch,
p + col,
1);
}
void prog2(
const realreal* sy,
realreal* wt,
const int& col,
const int& iPitch,
const realreal* v,
realreal* p,
const cudaStream_t& st)
{
int nblocks = iDivUp(col, 4);
if(!st) {
kernel1<<<nblocks, dim3(8, 4)>>>
(sy, col, v, iPitch, p);
} else {
kernel1<<<nblocks, dim3(8, 4), 0, st>>>
(sy, col, v, iPitch, p);
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
};
}; |
b1a31cc33b34c305df24764f3a95be445d06d105.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define PI 3.14159265358979323846
#include "Solver.h"
void Solver::calc_energy()
{
hipLaunchKernelGGL(( reduce_energy), dim3(rblocks),dim3(nthreads),sizeof(double)*nthreads, 0, dNe, dRi, uK_n, vK_n, wK_n, bK_n, d_energy_reduce);
cutilSafeCall( hipMemcpy(energy_reduce, d_energy_reduce, sizeof(double) * rblocks, hipMemcpyDeviceToHost) );
energy_o = energy;
energy = 0.0;
for (int i = 0; i < rblocks; i++) energy+= energy_reduce[i];
energy *= (0.5 / Ne);
double sqrtenergy = sqrt(energy);
cutilSafeCall( hipMemcpy(dsqrtenergy, &sqrtenergy, sizeof(double), hipMemcpyHostToDevice) );
};
void Solver::calc_residual()
{
cutilSafeCall( hipMemcpy(uK_o, uR_o, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(vK_o, vR_o, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(wK_o, wR_o, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(bK_o, bR_o, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( reduce_residual), dim3(rblocks),dim3(nthreads),sizeof(double)*nthreads, 0, dNe, dRi, uK_n, uK_o, vK_n, vK_o, wK_n, wK_o, bK_n, bK_o, d_residual_reduce);
cutilSafeCall( hipMemcpy(residual_reduce, d_residual_reduce, sizeof(double) * rblocks, hipMemcpyDeviceToHost) );
grad_residual = 0.0;
for (int i = 0; i < rblocks; i++) grad_residual+= residual_reduce[i];
grad_residual /= Ne*energy;
};
void Solver::optimize()
{
log_begin_optimize();
init_data_files();
hipLaunchKernelGGL(( init_IC), dim3(nblocks),dim3(nthreads), 0, 0, dNe, ix, iy, iz, uK_n, vK_n, wK_n, bK_n);
//calculates the energy of the solution (uK_n,vK_n,wK_n,bK_n) and normalize it
energy = 0.0;
calc_energy();
hipLaunchKernelGGL(( normalize), dim3(nblocks),dim3(nthreads), 0, 0, dNe, dsqrtenergy, uK_n, vK_n, wK_n, bK_n);
//copy the normalized input back to uR_o as the first 'old' solution store
cutilSafeCall( hipMemcpy(uR_o, uK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(vR_o, vK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(wR_o, wK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(bR_o, bK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
tolerance = 1.0e-6;
grad_residual = tolerance + 1;
energy_residual = tolerance + 1;
int its = 0;
clock_t optimize_start, optimize_end;
clock_t solver_start, solver_end;
double direct_time = 0.0;
double adjoint_time = 0.0;
double optimize_time = 0.0;
optimize_start = clock();
while (grad_residual > tolerance) {
//solve direct system, taking IC from uK_n and putting results (real space) to uK_n
solver_start = clock();
direct_solve();
solver_end = clock();
direct_time += (double) (solver_end - solver_start) / CLOCKS_PER_SEC;
//store gain, kinetic, potential, and other
write_to_data_files();
hipLaunchKernelGGL(( refactor_b), dim3(nblocks),dim3(nthreads), 0, 0, dNe, dRi, bK_n);
//solve adjoint system, taking IC from uK_n and putting results (real space) to uK_n
solver_start = clock();
adjoint_solve();
solver_end = clock();
adjoint_time += (double) (solver_end - solver_start) / CLOCKS_PER_SEC;
//calulate new energy after storing old energy, normalize solution and calculate residual
calc_energy();
energy_residual = abs(energy - energy_o) / energy;
hipLaunchKernelGGL(( normalize), dim3(nblocks),dim3(nthreads), 0, 0, dNe, dsqrtenergy, uK_n, vK_n, wK_n, bK_n);
calc_residual();
//store the old solution
cutilSafeCall( hipMemcpy(uR_o, uK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(vR_o, vK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(wR_o, wK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(bR_o, bK_n, sizeof(hipfftDoubleComplex) * Ne, hipMemcpyDeviceToHost) );
Solver::log("\nSOLVER::OPTIMIZE::its = " + std::to_string((++its)));
Solver::log("SOLVER::OPTIMIZE::energy = " + std::to_string(energy));
Solver::log("SOLVER::OPTIMIZE::gradient residual = " + std::to_string(grad_residual));
Solver::log("SOLVER::OPTIMIZE::energy residual = " + std::to_string(energy_residual) + "\n");
}
optimize_end = clock();
optimize_time = (double) (optimize_end - optimize_start) / CLOCKS_PER_SEC;
Solver::log("\nSOLVER::OPTIMIZE::DONE::its = " + std::to_string(its));
Solver::log("SOLVER::OPTIMIZE::DONE::energy = " + std::to_string(energy));
Solver::log("SOLVER::OPTIMIZE::DONE::gradient_residual = " + std::to_string(grad_residual));
Solver::log("SOLVER::OPTIMIZE::DONE::energy residual = " + std::to_string(energy_residual));
Solver::log("\n\tSOLVER::average time for direct_solve() = " + std::to_string(( (double) direct_time / its )));
Solver::log("\tSOLVER::average time for adjoint_solve() = " + std::to_string(( (double) adjoint_time / its )));
Solver::log("\tSOLVER::optimize time elapsed = " + std::to_string(optimize_time));
write_opt_fields();
close_data_files();
};
int Ny_for_Ly(double ly)
{
if (ly < 4.0) return 128;
else if (ly < 20.0) return 256;
else return 512;
};
/*
int main()
{
int nblocks = 128;
int nthreads = 512;
int Nx = 64;
int Ny = 128;
int Nz = 32;
double Lx = 2.0;
double Ly = 4.0;
double Lz = 2.0;
double Re = 100.0;
double Ri = 1.0;
double Pr = 1.0;
double T = 1.0;
double dt = 0.01;
Solver solver(nblocks,nthreads,Nx,Ny,Nz,Lx,Ly,Lz,Re,Ri,Pr,T,dt);
std::cout << "past constructor, about to call optimize" << std::endl;
solver.optimize();
};
*/
int main() {
int runs = 1;
double *Ly_arr = (double*) malloc(sizeof(double) * runs);
for (int i = 0; i < runs; i++) Ly_arr[i] = 2.0 + (2.0 * i);
int nblocks = 128;
int nthreads = 512;
int Nx = 64;
int Ny = 128;
int Nz = 32;
double Lx = 2.0;
double Ly = 4.0;
double Lz = 2.0;
double Re = 100.0;
double Ri = 1.0;
double Pr = 1.0;
double T = 1.0;
double dt = 0.01;
Solver solver(nblocks,nthreads,Nx,Ny_for_Ly(Ly_arr[0]),Nz,Lx,Ly_arr[0],Lz,Re,Ri,Pr,T,dt);
for (int i = 0; i < runs; i++) {
solver.optimize();
if (i != (runs-1)) {
double ly = Ly_arr[i+1];
int ny = Ny_for_Ly(ly);
solver.reset(nblocks,nthreads,Nx,ny,Nz,Lx,ly,Lz,Re,Ri,Pr,T,dt);
}
}
free(Ly_arr);
};
| b1a31cc33b34c305df24764f3a95be445d06d105.cu | #define PI 3.14159265358979323846
#include "Solver.h"
void Solver::calc_energy()
{
reduce_energy<<<rblocks,nthreads,sizeof(double)*nthreads>>>(dNe, dRi, uK_n, vK_n, wK_n, bK_n, d_energy_reduce);
cutilSafeCall( cudaMemcpy(energy_reduce, d_energy_reduce, sizeof(double) * rblocks, cudaMemcpyDeviceToHost) );
energy_o = energy;
energy = 0.0;
for (int i = 0; i < rblocks; i++) energy+= energy_reduce[i];
energy *= (0.5 / Ne);
double sqrtenergy = sqrt(energy);
cutilSafeCall( cudaMemcpy(dsqrtenergy, &sqrtenergy, sizeof(double), cudaMemcpyHostToDevice) );
};
void Solver::calc_residual()
{
cutilSafeCall( cudaMemcpy(uK_o, uR_o, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(vK_o, vR_o, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(wK_o, wR_o, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(bK_o, bR_o, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyHostToDevice) );
reduce_residual<<<rblocks,nthreads,sizeof(double)*nthreads>>>(dNe, dRi, uK_n, uK_o, vK_n, vK_o, wK_n, wK_o, bK_n, bK_o, d_residual_reduce);
cutilSafeCall( cudaMemcpy(residual_reduce, d_residual_reduce, sizeof(double) * rblocks, cudaMemcpyDeviceToHost) );
grad_residual = 0.0;
for (int i = 0; i < rblocks; i++) grad_residual+= residual_reduce[i];
grad_residual /= Ne*energy;
};
void Solver::optimize()
{
log_begin_optimize();
init_data_files();
init_IC<<<nblocks,nthreads>>>(dNe, ix, iy, iz, uK_n, vK_n, wK_n, bK_n);
//calculates the energy of the solution (uK_n,vK_n,wK_n,bK_n) and normalize it
energy = 0.0;
calc_energy();
normalize<<<nblocks,nthreads>>>(dNe, dsqrtenergy, uK_n, vK_n, wK_n, bK_n);
//copy the normalized input back to uR_o as the first 'old' solution store
cutilSafeCall( cudaMemcpy(uR_o, uK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(vR_o, vK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(wR_o, wK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(bR_o, bK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
tolerance = 1.0e-6;
grad_residual = tolerance + 1;
energy_residual = tolerance + 1;
int its = 0;
clock_t optimize_start, optimize_end;
clock_t solver_start, solver_end;
double direct_time = 0.0;
double adjoint_time = 0.0;
double optimize_time = 0.0;
optimize_start = clock();
while (grad_residual > tolerance) {
//solve direct system, taking IC from uK_n and putting results (real space) to uK_n
solver_start = clock();
direct_solve();
solver_end = clock();
direct_time += (double) (solver_end - solver_start) / CLOCKS_PER_SEC;
//store gain, kinetic, potential, and other
write_to_data_files();
refactor_b<<<nblocks,nthreads>>>(dNe, dRi, bK_n);
//solve adjoint system, taking IC from uK_n and putting results (real space) to uK_n
solver_start = clock();
adjoint_solve();
solver_end = clock();
adjoint_time += (double) (solver_end - solver_start) / CLOCKS_PER_SEC;
//calulate new energy after storing old energy, normalize solution and calculate residual
calc_energy();
energy_residual = abs(energy - energy_o) / energy;
normalize<<<nblocks,nthreads>>>(dNe, dsqrtenergy, uK_n, vK_n, wK_n, bK_n);
calc_residual();
//store the old solution
cutilSafeCall( cudaMemcpy(uR_o, uK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(vR_o, vK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(wR_o, wK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(bR_o, bK_n, sizeof(cufftDoubleComplex) * Ne, cudaMemcpyDeviceToHost) );
Solver::log("\nSOLVER::OPTIMIZE::its = " + std::to_string((++its)));
Solver::log("SOLVER::OPTIMIZE::energy = " + std::to_string(energy));
Solver::log("SOLVER::OPTIMIZE::gradient residual = " + std::to_string(grad_residual));
Solver::log("SOLVER::OPTIMIZE::energy residual = " + std::to_string(energy_residual) + "\n");
}
optimize_end = clock();
optimize_time = (double) (optimize_end - optimize_start) / CLOCKS_PER_SEC;
Solver::log("\nSOLVER::OPTIMIZE::DONE::its = " + std::to_string(its));
Solver::log("SOLVER::OPTIMIZE::DONE::energy = " + std::to_string(energy));
Solver::log("SOLVER::OPTIMIZE::DONE::gradient_residual = " + std::to_string(grad_residual));
Solver::log("SOLVER::OPTIMIZE::DONE::energy residual = " + std::to_string(energy_residual));
Solver::log("\n\tSOLVER::average time for direct_solve() = " + std::to_string(( (double) direct_time / its )));
Solver::log("\tSOLVER::average time for adjoint_solve() = " + std::to_string(( (double) adjoint_time / its )));
Solver::log("\tSOLVER::optimize time elapsed = " + std::to_string(optimize_time));
write_opt_fields();
close_data_files();
};
int Ny_for_Ly(double ly)
{
if (ly < 4.0) return 128;
else if (ly < 20.0) return 256;
else return 512;
};
/*
int main()
{
int nblocks = 128;
int nthreads = 512;
int Nx = 64;
int Ny = 128;
int Nz = 32;
double Lx = 2.0;
double Ly = 4.0;
double Lz = 2.0;
double Re = 100.0;
double Ri = 1.0;
double Pr = 1.0;
double T = 1.0;
double dt = 0.01;
Solver solver(nblocks,nthreads,Nx,Ny,Nz,Lx,Ly,Lz,Re,Ri,Pr,T,dt);
std::cout << "past constructor, about to call optimize" << std::endl;
solver.optimize();
};
*/
int main() {
int runs = 1;
double *Ly_arr = (double*) malloc(sizeof(double) * runs);
for (int i = 0; i < runs; i++) Ly_arr[i] = 2.0 + (2.0 * i);
int nblocks = 128;
int nthreads = 512;
int Nx = 64;
int Ny = 128;
int Nz = 32;
double Lx = 2.0;
double Ly = 4.0;
double Lz = 2.0;
double Re = 100.0;
double Ri = 1.0;
double Pr = 1.0;
double T = 1.0;
double dt = 0.01;
Solver solver(nblocks,nthreads,Nx,Ny_for_Ly(Ly_arr[0]),Nz,Lx,Ly_arr[0],Lz,Re,Ri,Pr,T,dt);
for (int i = 0; i < runs; i++) {
solver.optimize();
if (i != (runs-1)) {
double ly = Ly_arr[i+1];
int ny = Ny_for_Ly(ly);
solver.reset(nblocks,nthreads,Nx,ny,Nz,Lx,ly,Lz,Re,Ri,Pr,T,dt);
}
}
free(Ly_arr);
};
|
ef32256c75ffabed64a214f723497e3aca53f2d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file cov_froNorm_gpu.cu
// @brief fast MPN-COV implementation (GPU)
// @author Jiangtao Xie
// @author Peihua Li
/*
Copyright (C) 2018 Peihua Li and Jiangtao Xie
All rights reserved.
*/
#include "nncov_froNorm_blas.hpp"
#include "../data.hpp"
#include <math.h>
#include <memory>
#include <cstdlib>
#include <algorithm>
#include <limits>
#include <cassert>
#include "blashelper_gpu.hpp"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) // We import this Macro function from our Caffe Implementation
inline int
GET_BLOCKS(const int N)
{
return (N + VL_CUDA_NUM_THREADS - 1) / VL_CUDA_NUM_THREADS; // We import this function from our Caffe Implementation
}
template<typename T> __global__ void set_kernel(const ptrdiff_t n, const T alpha, T* y)
{
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template<typename T> void gpuMemset(const ptrdiff_t n, const T alpha, T* y)
{
if(alpha == 0){
hipMemset(y, 0, sizeof(T)*n);
}
hipLaunchKernelGGL(( set_kernel<T>), dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, n , alpha, y);
}
template<typename T> __global__ void init_I_kernel(T* a,
T alpha,
const ptrdiff_t n)
{
CUDA_KERNEL_LOOP(index,n){
a[index*(n+1)] = alpha;
}
}
template<typename T> __global__ void froNormBackward_kernel(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) {
a[i] += alpha[0] * b[i];
}
}
template<typename T> __host__ void
froNormBackward_gpu(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
hipLaunchKernelGGL(( froNormBackward_kernel<T>)
, dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, a,b,alpha,n);
}
template<typename T> __global__ void scale_kernel(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = (1.0f / alpha[0]) * b[i];
}
}
template<typename T> __global__ void scale2_kernel(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = sqrt(alpha[0]) * b[i];
}
}
template<typename T> __global__ void mul_kernel(T* a,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = a[i] / ( 2.0f * alpha[0]);
}
}
template<typename T> __global__ void sqrt_kernel(T* a,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = sqrt(a[i]);
}
}
template<typename T> __global__ void computeDerdataAux_kernel(T* a,
T const* b,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] /= 2 * pow(b[i], (T)3.0f /2.0f);
}
}
template<typename T> __global__ void computeGradientCoef_kernel(T* a,
T const* b,
T const* c,
T const* d,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = b[i] - c[i] / ( pow(d[i], (T)3.0f));
}
}
namespace vl { namespace impl {
template<typename T,vl::DataType dataType>
struct cov_froNorm<vl::VLDT_GPU,T,dataType>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
T* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height,n = width,L = num,d;
ptrdiff_t dataOffset;
ptrdiff_t aux_TOffset;
ptrdiff_t outputOffset;
for(d = 0;d < L; d++){ // Fro-Norm
aux_TOffset = d;
dataOffset = d*n*n;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context,
n*n,
data + dataOffset,ptrdiff_t(1),
data + dataOffset,ptrdiff_t(1),
aux_T + aux_TOffset);
if(error != vl::VLE_Success) {goto done ;}
}
hipLaunchKernelGGL(( sqrt_kernel<T>), dim3(GET_BLOCKS(L)),dim3(VL_CUDA_NUM_THREADS), 0, 0, aux_T,L);
for(d = 0;d < L; d++){
aux_TOffset = d;
outputOffset = d*n*n;
dataOffset = d*n*n;
hipLaunchKernelGGL(( scale_kernel<T>), dim3(GET_BLOCKS(n*n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, output + outputOffset,data + dataOffset,aux_T + aux_TOffset,n*n);
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
backward(Context& context,
T* derData,
T const* data,
T const* derOutput,
T const* derOutput_aux,
T const* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height,n = width,L = num,d;
ptrdiff_t derDataOffset,aux_TOffset,gradientValueOffset;
ptrdiff_t dataOffset,derOutput_auxOffset,P_dot_dLdPOffset;
ptrdiff_t derOutputOffset;
unsigned int workspaceSize = (unsigned int)(2*L);
T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T));
T* P_dot_dLdP = workspace;
T* gradientValue = P_dot_dLdP + L;
for(d = 0;d < L;d++){
dataOffset = d*m*n;
derOutput_auxOffset = d;
derOutputOffset = d*m*n;
aux_TOffset = d;
P_dot_dLdPOffset = d;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context,
n*n,
data + dataOffset,ptrdiff_t(1),
derOutput + derOutputOffset,ptrdiff_t(1),
P_dot_dLdP + P_dot_dLdPOffset);
if(error != vl::VLE_Success) {goto done ;}
}
for(d = 0;d < L;d++){
aux_TOffset = d;
derDataOffset = d*m*n;
derOutputOffset = d*m*n;
hipLaunchKernelGGL(( scale_kernel<T>), dim3(GET_BLOCKS(n*n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,n*n);
}
hipLaunchKernelGGL(( computeGradientCoef_kernel<T>), dim3(GET_BLOCKS(L)),dim3(VL_CUDA_NUM_THREADS), 0, 0, gradientValue,derOutput_aux,P_dot_dLdP,aux_T,L);
for(d = 0;d < L;d++){
dataOffset = d*m*n;
derDataOffset = d*m*n;
gradientValueOffset = d;
froNormBackward_gpu(derData + derDataOffset,data + dataOffset,gradientValue + gradientValueOffset, m*n);
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
forward_aux(Context& context,
T* output,
T const* data,
T* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t length = depth,L = num,d;
ptrdiff_t outputOffset,aux_TOffset,dataOffset;
for(d = 0; d < L; d++){
outputOffset = d*length;
dataOffset = d*length;
aux_TOffset = d;
/*error = vl::impl::blas<vl::VLDT_CPU, dataType>::scal(context,
length,
alpha,
output + OutputOffset,ptrdiff_t(1));
if(error != vl::VLE_Success) {goto done ;}*/
hipLaunchKernelGGL(( scale2_kernel<T>), dim3(GET_BLOCKS(length)),dim3(VL_CUDA_NUM_THREADS), 0, 0, output + outputOffset,data + dataOffset,aux_T + aux_TOffset,length);
error = vl::VLE_Success;
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
backward_aux(Context& context,
T* derData,
T* derData_aux,
T const* data,
T const* derOutput,
T const* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t length = depth,L = num,d;
ptrdiff_t dataOffset,aux_TOffset;
ptrdiff_t derDataOffset,derData_auxOffset;
ptrdiff_t derOutputOffset;
for(d = 0; d < L; d++){
derOutputOffset = d*length;
derData_auxOffset = d;
dataOffset = d*length;
aux_TOffset= d;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context,
length,
data + dataOffset,ptrdiff_t(1),
derOutput + derOutputOffset,ptrdiff_t(1),
derData_aux + derData_auxOffset);
if(error != vl::VLE_Success) {goto done ;}
}
hipLaunchKernelGGL(( computeDerdataAux_kernel<T>), dim3(GET_BLOCKS(L)),dim3(VL_CUDA_NUM_THREADS), 0, 0, derData_aux,aux_T,L);
for(d = 0;d < L; d++){
derDataOffset = d*length;
derOutputOffset = d*length;
aux_TOffset= d;
hipLaunchKernelGGL(( scale2_kernel<T>), dim3(GET_BLOCKS(length)),dim3(VL_CUDA_NUM_THREADS), 0, 0, derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,length);
}
done:
return context.passError(error, __func__);
}
};
} }
template struct vl::impl::cov_froNorm<vl::VLDT_GPU, float,vl::VLDT_Float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::cov_froNorm<vl::VLDT_GPU, double, vl::VLDT_Double> ;
#endif
| ef32256c75ffabed64a214f723497e3aca53f2d0.cu | // @file cov_froNorm_gpu.cu
// @brief fast MPN-COV implementation (GPU)
// @author Jiangtao Xie
// @author Peihua Li
/*
Copyright (C) 2018 Peihua Li and Jiangtao Xie
All rights reserved.
*/
#include "nncov_froNorm_blas.hpp"
#include "../data.hpp"
#include <math.h>
#include <memory>
#include <cstdlib>
#include <algorithm>
#include <limits>
#include <cassert>
#include "blashelper_gpu.hpp"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) // We import this Macro function from our Caffe Implementation
inline int
GET_BLOCKS(const int N)
{
return (N + VL_CUDA_NUM_THREADS - 1) / VL_CUDA_NUM_THREADS; // We import this function from our Caffe Implementation
}
template<typename T> __global__ void set_kernel(const ptrdiff_t n, const T alpha, T* y)
{
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template<typename T> void gpuMemset(const ptrdiff_t n, const T alpha, T* y)
{
if(alpha == 0){
cudaMemset(y, 0, sizeof(T)*n);
}
set_kernel<T><<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(n , alpha, y);
}
template<typename T> __global__ void init_I_kernel(T* a,
T alpha,
const ptrdiff_t n)
{
CUDA_KERNEL_LOOP(index,n){
a[index*(n+1)] = alpha;
}
}
template<typename T> __global__ void froNormBackward_kernel(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) {
a[i] += alpha[0] * b[i];
}
}
template<typename T> __host__ void
froNormBackward_gpu(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
froNormBackward_kernel<T>
<<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(a,b,alpha,n);
}
template<typename T> __global__ void scale_kernel(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = (1.0f / alpha[0]) * b[i];
}
}
template<typename T> __global__ void scale2_kernel(T* a,
T const* b,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = sqrt(alpha[0]) * b[i];
}
}
template<typename T> __global__ void mul_kernel(T* a,
T const* alpha,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = a[i] / ( 2.0f * alpha[0]);
}
}
template<typename T> __global__ void sqrt_kernel(T* a,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = sqrt(a[i]);
}
}
template<typename T> __global__ void computeDerdataAux_kernel(T* a,
T const* b,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] /= 2 * pow(b[i], (T)3.0f /2.0f);
}
}
template<typename T> __global__ void computeGradientCoef_kernel(T* a,
T const* b,
T const* c,
T const* d,
ptrdiff_t n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
a[i] = b[i] - c[i] / ( pow(d[i], (T)3.0f));
}
}
namespace vl { namespace impl {
template<typename T,vl::DataType dataType>
struct cov_froNorm<vl::VLDT_GPU,T,dataType>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
T* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height,n = width,L = num,d;
ptrdiff_t dataOffset;
ptrdiff_t aux_TOffset;
ptrdiff_t outputOffset;
for(d = 0;d < L; d++){ // Fro-Norm
aux_TOffset = d;
dataOffset = d*n*n;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context,
n*n,
data + dataOffset,ptrdiff_t(1),
data + dataOffset,ptrdiff_t(1),
aux_T + aux_TOffset);
if(error != vl::VLE_Success) {goto done ;}
}
sqrt_kernel<T><<<GET_BLOCKS(L),VL_CUDA_NUM_THREADS>>>(aux_T,L);
for(d = 0;d < L; d++){
aux_TOffset = d;
outputOffset = d*n*n;
dataOffset = d*n*n;
scale_kernel<T><<<GET_BLOCKS(n*n),VL_CUDA_NUM_THREADS>>>(output + outputOffset,data + dataOffset,aux_T + aux_TOffset,n*n);
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
backward(Context& context,
T* derData,
T const* data,
T const* derOutput,
T const* derOutput_aux,
T const* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height,n = width,L = num,d;
ptrdiff_t derDataOffset,aux_TOffset,gradientValueOffset;
ptrdiff_t dataOffset,derOutput_auxOffset,P_dot_dLdPOffset;
ptrdiff_t derOutputOffset;
unsigned int workspaceSize = (unsigned int)(2*L);
T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T));
T* P_dot_dLdP = workspace;
T* gradientValue = P_dot_dLdP + L;
for(d = 0;d < L;d++){
dataOffset = d*m*n;
derOutput_auxOffset = d;
derOutputOffset = d*m*n;
aux_TOffset = d;
P_dot_dLdPOffset = d;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context,
n*n,
data + dataOffset,ptrdiff_t(1),
derOutput + derOutputOffset,ptrdiff_t(1),
P_dot_dLdP + P_dot_dLdPOffset);
if(error != vl::VLE_Success) {goto done ;}
}
for(d = 0;d < L;d++){
aux_TOffset = d;
derDataOffset = d*m*n;
derOutputOffset = d*m*n;
scale_kernel<T><<<GET_BLOCKS(n*n),VL_CUDA_NUM_THREADS>>>(derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,n*n);
}
computeGradientCoef_kernel<T><<<GET_BLOCKS(L),VL_CUDA_NUM_THREADS>>>(gradientValue,derOutput_aux,P_dot_dLdP,aux_T,L);
for(d = 0;d < L;d++){
dataOffset = d*m*n;
derDataOffset = d*m*n;
gradientValueOffset = d;
froNormBackward_gpu(derData + derDataOffset,data + dataOffset,gradientValue + gradientValueOffset, m*n);
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
forward_aux(Context& context,
T* output,
T const* data,
T* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t length = depth,L = num,d;
ptrdiff_t outputOffset,aux_TOffset,dataOffset;
for(d = 0; d < L; d++){
outputOffset = d*length;
dataOffset = d*length;
aux_TOffset = d;
/*error = vl::impl::blas<vl::VLDT_CPU, dataType>::scal(context,
length,
alpha,
output + OutputOffset,ptrdiff_t(1));
if(error != vl::VLE_Success) {goto done ;}*/
scale2_kernel<T><<<GET_BLOCKS(length),VL_CUDA_NUM_THREADS>>>(output + outputOffset,data + dataOffset,aux_T + aux_TOffset,length);
error = vl::VLE_Success;
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
backward_aux(Context& context,
T* derData,
T* derData_aux,
T const* data,
T const* derOutput,
T const* aux_T,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t length = depth,L = num,d;
ptrdiff_t dataOffset,aux_TOffset;
ptrdiff_t derDataOffset,derData_auxOffset;
ptrdiff_t derOutputOffset;
for(d = 0; d < L; d++){
derOutputOffset = d*length;
derData_auxOffset = d;
dataOffset = d*length;
aux_TOffset= d;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::dot(context,
length,
data + dataOffset,ptrdiff_t(1),
derOutput + derOutputOffset,ptrdiff_t(1),
derData_aux + derData_auxOffset);
if(error != vl::VLE_Success) {goto done ;}
}
computeDerdataAux_kernel<T><<<GET_BLOCKS(L),VL_CUDA_NUM_THREADS>>>(derData_aux,aux_T,L);
for(d = 0;d < L; d++){
derDataOffset = d*length;
derOutputOffset = d*length;
aux_TOffset= d;
scale2_kernel<T><<<GET_BLOCKS(length),VL_CUDA_NUM_THREADS>>>(derData + derDataOffset,derOutput + derOutputOffset,aux_T + aux_TOffset,length);
}
done:
return context.passError(error, __func__);
}
};
} }
template struct vl::impl::cov_froNorm<vl::VLDT_GPU, float,vl::VLDT_Float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::cov_froNorm<vl::VLDT_GPU, double, vl::VLDT_Double> ;
#endif
|
6688d4f60f28f22688eaa9f61d67ba2f857e19d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches zlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
zgeadd_batched_kernel(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaDoubleComplex *dA = dAarray[ blockIdx.y ];
magmaDoubleComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaDoubleComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_zgeadd_batched(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
hipLaunchKernelGGL(( zgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
| 6688d4f60f28f22688eaa9f61d67ba2f857e19d1.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches zlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
zgeadd_batched_kernel(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaDoubleComplex *dA = dAarray[ blockIdx.y ];
magmaDoubleComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaDoubleComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_zgeadd_batched(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
zgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
|
dfb6150b08a88eed66900d199807f4cbb7b37805.hip | // !!! This is a file automatically generated by hipify!!!
/* -*- c-basic-offset:2; tab-width:2; indent-tabs-mode:nil -*-
*
*/
#include <deal.II/base/subscriptor.h>
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/timer.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/constraint_matrix.h>
#include <deal.II/lac/vector_memory.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/tria_boundary_lib.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_tools.h>
#include <fstream>
#include <sstream>
#include "matrix_free_gpu/defs.h"
#include "matrix_free_gpu/gpu_vec.h"
#include "matrix_free_gpu/cuda_utils.cuh"
#include "matrix_free_gpu/cuda_sparse_matrix.h"
#include "matrix_free_gpu/gpu_array.cuh"
#include "poisson_common.h"
#include "bmop_common.h"
using namespace dealii;
// #define USE_HANGING_NODES
#define N_ITERATIONS 100
#ifdef DEGREE_FE
const unsigned int degree_finite_element = DEGREE_FE;
#else
const unsigned int degree_finite_element = 4;
#endif
#ifdef DIMENSION
const unsigned int dimension = DIMENSION;
#else
const unsigned int dimension = 3;
#endif
#ifdef BMOP_USE_FLOATS
typedef float number;
#else
typedef double number;
#endif
//-------------------------------------------------------------------------
// problem
//-------------------------------------------------------------------------
template <int dim, int fe_degree>
class LaplaceProblem
{
public:
LaplaceProblem ();
void run (int nref);
private:
void setup_system ();
void assemble_system ();
void solve ();
Triangulation<dim> triangulation;
FE_Q<dim> fe;
DoFHandler<dim> dof_handler;
ConstraintMatrix constraints;
SparsityPattern sparsity_pattern;
typedef CUDAWrappers::SparseMatrix<number> SystemMatrixType;
SystemMatrixType system_matrix;
GpuVector<number> src;
GpuVector<number> dst;
double setup_time;
ConditionalOStream time_details;
unsigned int n_iterations;
};
template <int dim, int fe_degree>
LaplaceProblem<dim,fe_degree>::LaplaceProblem ()
:
fe (fe_degree),
dof_handler (triangulation),
time_details (std::cout, false),
n_iterations(N_ITERATIONS)
{}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::setup_system ()
{
Timer time;
time.start ();
setup_time = 0;
dof_handler.distribute_dofs (fe);
constraints.clear();
VectorTools::interpolate_boundary_values (dof_handler,
0,
ZeroFunction<dim>(),
constraints);
DoFTools::make_hanging_node_constraints(dof_handler,constraints);
constraints.close();
setup_time += time.wall_time();
DynamicSparsityPattern dsp(dof_handler.n_dofs());
DoFTools::make_sparsity_pattern(dof_handler,
dsp,
constraints,
/*keep_constrained_dofs = */ false);
sparsity_pattern.copy_from(dsp);
dst.reinit (dof_handler.n_dofs());
src.reinit (dof_handler.n_dofs());
setup_time += time.wall_time();
}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::assemble_system ()
{
Timer time;
// assemble matrix
SparseMatrix<number> system_matrix_host(sparsity_pattern);
CoefficientFun<dim,number> coeff;
const QGauss<dim> quadrature_formula(fe.degree+1);
FEValues<dim> fe_values (fe, quadrature_formula,
update_values | update_gradients |
update_quadrature_points | update_JxW_values );
const unsigned int dofs_per_cell = fe.dofs_per_cell;
const unsigned int n_q_points = quadrature_formula.size();
FullMatrix<number> cell_matrix (dofs_per_cell, dofs_per_cell);
std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
int ncells = dof_handler.get_triangulation().n_active_cells();
typename DoFHandler<dim>::active_cell_iterator
cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
cell_matrix = 0;
fe_values.reinit (cell);
for (unsigned int q_index=0; q_index<n_q_points; ++q_index)
{
const number current_coefficient = coeff.value(fe_values.quadrature_point (q_index));
for (unsigned int i=0; i<dofs_per_cell; ++i)
{
for (unsigned int j=0; j<dofs_per_cell; ++j)
cell_matrix(i,j) += (current_coefficient *
fe_values.shape_grad(i,q_index) *
fe_values.shape_grad(j,q_index) *
fe_values.JxW(q_index));
}
}
cell->get_dof_indices (local_dof_indices);
constraints.distribute_local_to_global (cell_matrix,
local_dof_indices,
system_matrix_host);
}
system_matrix.reinit(system_matrix_host);
setup_time += time.wall_time();
}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::solve ()
{
Timer time;
// IC
dst = 0.1;
for(int i = 0; i < n_iterations; ++i) {
dst.swap(src);
system_matrix.vmult(dst,src);
}
hipDeviceSynchronize();
time.stop();
printf("%d\t%d\t%d\t%g\n",dim,fe_degree,dof_handler.n_dofs(),time.wall_time() / n_iterations);
}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::run (int n_ref)
{
#ifdef BALL_GRID
domain_case_t domain = BALL;
#else
domain_case_t domain = CUBE;
#endif
#ifdef ADAPTIVE_GRID
bool pseudo_adaptive_grid = true;
#else
bool pseudo_adaptive_grid = false;
#endif
bmop_setup_mesh(triangulation, domain,
pseudo_adaptive_grid, n_ref);
setup_system ();
assemble_system ();
solve ();
}
int main (int argc, char **argv)
{
try
{
int max_refinement = 1;
int min_refinement = 0;
if(argc > 1)
max_refinement = atoi(argv[1]);
if(argc > 2)
min_refinement = atoi(argv[2]);
deallog.depth_console(0);
for(int r=min_refinement; r<=max_refinement; r++) {
LaplaceProblem<dimension,degree_finite_element> laplace_problem;
laplace_problem.run ( r);
}
}
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Exception on processing: " << std::endl
<< exc.what() << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
catch (...)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Unknown exception!" << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
GrowingVectorMemory<GpuVector<number> >::release_unused_memory();
return 0;
}
| dfb6150b08a88eed66900d199807f4cbb7b37805.cu | /* -*- c-basic-offset:2; tab-width:2; indent-tabs-mode:nil -*-
*
*/
#include <deal.II/base/subscriptor.h>
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/timer.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/constraint_matrix.h>
#include <deal.II/lac/vector_memory.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/tria_boundary_lib.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_tools.h>
#include <fstream>
#include <sstream>
#include "matrix_free_gpu/defs.h"
#include "matrix_free_gpu/gpu_vec.h"
#include "matrix_free_gpu/cuda_utils.cuh"
#include "matrix_free_gpu/cuda_sparse_matrix.h"
#include "matrix_free_gpu/gpu_array.cuh"
#include "poisson_common.h"
#include "bmop_common.h"
using namespace dealii;
// #define USE_HANGING_NODES
#define N_ITERATIONS 100
#ifdef DEGREE_FE
const unsigned int degree_finite_element = DEGREE_FE;
#else
const unsigned int degree_finite_element = 4;
#endif
#ifdef DIMENSION
const unsigned int dimension = DIMENSION;
#else
const unsigned int dimension = 3;
#endif
#ifdef BMOP_USE_FLOATS
typedef float number;
#else
typedef double number;
#endif
//-------------------------------------------------------------------------
// problem
//-------------------------------------------------------------------------
template <int dim, int fe_degree>
class LaplaceProblem
{
public:
LaplaceProblem ();
void run (int nref);
private:
void setup_system ();
void assemble_system ();
void solve ();
Triangulation<dim> triangulation;
FE_Q<dim> fe;
DoFHandler<dim> dof_handler;
ConstraintMatrix constraints;
SparsityPattern sparsity_pattern;
typedef CUDAWrappers::SparseMatrix<number> SystemMatrixType;
SystemMatrixType system_matrix;
GpuVector<number> src;
GpuVector<number> dst;
double setup_time;
ConditionalOStream time_details;
unsigned int n_iterations;
};
template <int dim, int fe_degree>
LaplaceProblem<dim,fe_degree>::LaplaceProblem ()
:
fe (fe_degree),
dof_handler (triangulation),
time_details (std::cout, false),
n_iterations(N_ITERATIONS)
{}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::setup_system ()
{
Timer time;
time.start ();
setup_time = 0;
dof_handler.distribute_dofs (fe);
constraints.clear();
VectorTools::interpolate_boundary_values (dof_handler,
0,
ZeroFunction<dim>(),
constraints);
DoFTools::make_hanging_node_constraints(dof_handler,constraints);
constraints.close();
setup_time += time.wall_time();
DynamicSparsityPattern dsp(dof_handler.n_dofs());
DoFTools::make_sparsity_pattern(dof_handler,
dsp,
constraints,
/*keep_constrained_dofs = */ false);
sparsity_pattern.copy_from(dsp);
dst.reinit (dof_handler.n_dofs());
src.reinit (dof_handler.n_dofs());
setup_time += time.wall_time();
}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::assemble_system ()
{
Timer time;
// assemble matrix
SparseMatrix<number> system_matrix_host(sparsity_pattern);
CoefficientFun<dim,number> coeff;
const QGauss<dim> quadrature_formula(fe.degree+1);
FEValues<dim> fe_values (fe, quadrature_formula,
update_values | update_gradients |
update_quadrature_points | update_JxW_values );
const unsigned int dofs_per_cell = fe.dofs_per_cell;
const unsigned int n_q_points = quadrature_formula.size();
FullMatrix<number> cell_matrix (dofs_per_cell, dofs_per_cell);
std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
int ncells = dof_handler.get_triangulation().n_active_cells();
typename DoFHandler<dim>::active_cell_iterator
cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
cell_matrix = 0;
fe_values.reinit (cell);
for (unsigned int q_index=0; q_index<n_q_points; ++q_index)
{
const number current_coefficient = coeff.value(fe_values.quadrature_point (q_index));
for (unsigned int i=0; i<dofs_per_cell; ++i)
{
for (unsigned int j=0; j<dofs_per_cell; ++j)
cell_matrix(i,j) += (current_coefficient *
fe_values.shape_grad(i,q_index) *
fe_values.shape_grad(j,q_index) *
fe_values.JxW(q_index));
}
}
cell->get_dof_indices (local_dof_indices);
constraints.distribute_local_to_global (cell_matrix,
local_dof_indices,
system_matrix_host);
}
system_matrix.reinit(system_matrix_host);
setup_time += time.wall_time();
}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::solve ()
{
Timer time;
// IC
dst = 0.1;
for(int i = 0; i < n_iterations; ++i) {
dst.swap(src);
system_matrix.vmult(dst,src);
}
cudaDeviceSynchronize();
time.stop();
printf("%d\t%d\t%d\t%g\n",dim,fe_degree,dof_handler.n_dofs(),time.wall_time() / n_iterations);
}
template <int dim, int fe_degree>
void LaplaceProblem<dim,fe_degree>::run (int n_ref)
{
#ifdef BALL_GRID
domain_case_t domain = BALL;
#else
domain_case_t domain = CUBE;
#endif
#ifdef ADAPTIVE_GRID
bool pseudo_adaptive_grid = true;
#else
bool pseudo_adaptive_grid = false;
#endif
bmop_setup_mesh(triangulation, domain,
pseudo_adaptive_grid, n_ref);
setup_system ();
assemble_system ();
solve ();
}
int main (int argc, char **argv)
{
try
{
int max_refinement = 1;
int min_refinement = 0;
if(argc > 1)
max_refinement = atoi(argv[1]);
if(argc > 2)
min_refinement = atoi(argv[2]);
deallog.depth_console(0);
for(int r=min_refinement; r<=max_refinement; r++) {
LaplaceProblem<dimension,degree_finite_element> laplace_problem;
laplace_problem.run ( r);
}
}
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Exception on processing: " << std::endl
<< exc.what() << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
catch (...)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Unknown exception!" << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
GrowingVectorMemory<GpuVector<number> >::release_unused_memory();
return 0;
}
|
616a05daacb6b8cb7903d8a8abaf608ea75d8421.hip | // !!! This is a file automatically generated by hipify!!!
/**
* (C) Copyright 2020 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "forward_backward_pass.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_transfer_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* DefferenceRPUDeviceCuda
CUDA implementation of TransferRPUDevice
*/
template <typename T> void TransferRPUDeviceCuda<T>::initialize() {
transfer_pwu_ = make_unique<PulsedWeightUpdater<T>>(this->context_, this->x_size_, this->d_size_);
transfer_iom_ = make_unique<InputOutputManager<T>>(this->context_, this->x_size_, this->d_size_);
}
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(
CudaContext *c, const TransferRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(const TransferRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
if (other.transfer_vecs_ != nullptr) {
transfer_vecs_ = make_unique<CudaArray<T>>(*other.transfer_vecs_);
}
initialize();
current_col_indices_ = other.current_col_indices_;
fully_hidden_ = other.fully_hidden_;
this->context_->synchronizeDevice();
};
// copy assignment
template <typename T>
TransferRPUDeviceCuda<T> &
TransferRPUDeviceCuda<T>::operator=(const TransferRPUDeviceCuda<T> &other) {
TransferRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(TransferRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
TransferRPUDeviceCuda<T> &TransferRPUDeviceCuda<T>::operator=(TransferRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
transfer_vecs_ = std::move(other.transfer_vecs_);
current_col_indices_ = other.current_col_indices_;
other.current_col_indices_.clear();
fully_hidden_ = other.fully_hidden_;
transfer_pwu_ = std::move(other.transfer_pwu_);
transfer_iom_ = std::move(other.transfer_iom_);
// ignore transfer_tmp_ or RNG
return *this;
};
template <typename T>
void TransferRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const TransferRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects TransferRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
const auto &par = getPar();
if (!par.single_device_update) {
RPU_FATAL("Multiple device update not supported for Transfer Device");
}
if (!par.same_context) {
RPU_FATAL("Only same context supported");
}
if (this->n_devices_ < 2) {
RPU_FATAL("Expect at least two devices.");
}
for (int j = 1; j < this->n_devices_ - 1; j++) {
if (par.transfer_every_vec[0] > par.transfer_every_vec[j]) {
RPU_FATAL("Later transfer periods need to be larger than first for CUDA.");
}
}
transfer_vecs_ = make_unique<CudaArray<T>>(
this->context_, this->x_size_ * this->x_size_, rpu_device.getTransferVecs());
initialize(); // pwu/iom
current_col_indices_.resize(this->n_devices_ - 1);
std::fill(current_col_indices_.begin(), current_col_indices_.end(), 0);
this->current_update_idx_ = 0;
fully_hidden_ = par.fullyHidden();
}
/* partially transfer using the given "readout" transfer vectors
(with io-managed forward) and the usualy device update */
template <typename T>
void TransferRPUDeviceCuda<T>::forwardUpdate(
int to_device_idx,
int from_device_idx,
int i_col_start,
const T lr,
const T *x_input,
const int n_vec,
const bool trans,
const PulsedUpdateMetaParameter<T> &up) {
if (!lr) {
return;
}
if ((transfer_tmp_ == nullptr) || transfer_tmp_->getSize() < n_vec * this->d_size_) {
transfer_tmp_ = make_unique<CudaArray<T>>(this->context_, this->d_size_ * n_vec);
}
// forward with transfer vectors
RPU::detail::forwardMatrixIteratorIOManaged(
this->context_, this->dev_weights_ptrs_[from_device_idx], x_input, this->x_size_, trans,
transfer_tmp_->getData(), this->d_size_, trans, n_vec,
(T)1.0, // additional output scaling. Obey?
*transfer_iom_, getPar().transfer_io, false);
// update according to device
T *W = this->dev_weights_ptrs_[to_device_idx]; /// note that the ptrs might point to the current
/// weight
// since we need *positive* update, LR needs to be
// negative. However, this is not supported in the PWU
// really. Thus we scale the temp-vector by -1
RPU::math::scal(this->context_, this->d_size_ * n_vec, (T)-1.0, transfer_tmp_->getData(), 1);
transfer_pwu_->update(
x_input, // this is the transfer vector (x_size)
transfer_tmp_->getDataConst(), // this should be d_size
W, &*this->rpucuda_device_vec_[to_device_idx], up, fabs(lr), n_vec, trans, trans);
}
template <typename T>
void TransferRPUDeviceCuda<T>::transfer(
int to_device_idx,
int from_device_idx,
const PulsedUpdateMetaParameter<T> ¤t_up,
const T current_lr) {
int i_col = current_col_indices_[from_device_idx];
const auto &par = getPar();
if (par.random_column) {
i_col = MAX(MIN(floor(this->rw_rng_.sampleUniform() * this->x_size_), this->x_size_ - 1), 0);
}
// transfer_vecs_ is always x_size-major (that is trans==false)
T *tvec = transfer_vecs_->getData() + i_col * this->x_size_;
int n_rest = this->x_size_ - i_col;
T lr = par.getTransferLR(to_device_idx, from_device_idx, current_lr);
const PulsedUpdateMetaParameter<T> *up;
up = &par.transfer_up;
int n_transfers = MIN(par.n_cols_per_transfer, this->x_size_);
if (n_rest < n_transfers) {
// rest
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_rest, false, *up);
// from beginning
forwardUpdate(
to_device_idx, from_device_idx, 0, lr, transfer_vecs_->getData(), n_transfers - n_rest,
false, *up);
} else {
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_transfers, false, *up);
}
if (this->rw_rng_.sampleUniform() <
par.with_reset_prob) { // COL-wise prob!! device-wise reset_prob=1
this->rpucuda_device_vec_[from_device_idx]->resetCols(
this->dev_weights_ptrs_[from_device_idx], i_col, n_transfers, 1);
}
current_col_indices_[from_device_idx] = (i_col + n_transfers) % this->x_size_;
}
template <typename T> int TransferRPUDeviceCuda<T>::getTransferEvery(int didx, int m_batch) const {
if (getPar().units_in_mbatch) {
return MAX(RPU_ROUNDFUN(getPar().transfer_every_vec[didx] * m_batch), 0);
} else {
return MAX(RPU_ROUNDFUN(getPar().transfer_every_vec[didx]), 0);
}
}
template <typename T>
pwukpvec_t<T> TransferRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
pwukpvec_t<T> v;
// use the first device as the "FAST" device that gets updates with the true gradients.
v = this->rpucuda_device_vec_[0]->getUpdateKernels(m_batch, nK32, use_bo64, out_trans, up);
if (RPU_ROUNDFUN((T)m_batch / getTransferEvery(0, m_batch)) > 1) {
for (auto &kpars : v) {
kpars->ensureChunk();
}
}
return v;
}
template <typename T>
void TransferRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
hiprandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error because derived from abstract
// device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
// always same (up) context.
CudaContext *c = up_context;
if (x_counts_chunk != nullptr || d_counts_chunk != nullptr) {
RPU_FATAL("Chunking not allowed here.");
}
int nchunks =
RPU_ROUNDFUN((T)m_batch / getTransferEvery(0, m_batch)); // take next integer for period
if (nchunks <= 1) {
// just update the whole batch we do not call kpars directly to
// also make possible to have non-pulsed devices. Note that only
// one device is directly updated with the gradients, thus
// tuning kpars are always unique (and valid to that rpu_device
// only). However, the other RPU device kernels will be tuned
// during transfer, since we use a seperate PWU object
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars, c, this->dev_weights_ptrs_[0], m_batch, blm, up, dev_states, one_sided);
this->current_update_idx_ += m_batch; // first update idx
if (!up._currently_tuning) {
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to batches.
int period = (getTransferEvery(j, m_batch) + m_batch - 1) / m_batch; // in m_batch
if (this->current_update_idx_ / m_batch % period == 0) {
transfer(j + 1, j, up, blm->getCurrentLR());
}
}
}
} else {
// need to do it chunkwise
int chunk_size = (m_batch + nchunks - 1) / nchunks; // to ensure not to have residual
for (int i_chunk = 0; i_chunk < nchunks; i_chunk++) {
int batch_start = i_chunk * chunk_size;
// note that last chunk might be smaller.
T current_m_batch = chunk_size - MAX(batch_start + chunk_size - m_batch, 0);
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars,
c, // same context since sequence important
this->dev_weights_ptrs_[0], current_m_batch, blm, up, dev_states, one_sided,
blm->getXCountsData() + i_chunk * this->x_size_ * up.getNK32Default(), // always non-trans
blm->getDCountsData() + i_chunk * this->d_size_ * up.getNK32Default());
this->current_update_idx_ += current_m_batch; // first update idx
if (!up._currently_tuning) {
// transfer
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to chunk_sizes
int period = (getTransferEvery(j, m_batch) + chunk_size - 1) / chunk_size;
if (this->current_update_idx_ / chunk_size % period == 0) {
transfer(j + 1, j, up, blm->getCurrentLR());
}
}
}
}
}
// only reduce at end
this->reduceToWeights(up_context, dev_weights);
}
template <typename T>
void TransferRPUDeviceCuda<T>::reduceToWeights(CudaContext *context, T *dev_weights) {
if (!fully_hidden_) {
VectorRPUDeviceCuda<T>::reduceToWeights(context, dev_weights);
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, T alpha, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, alpha, bias_no_decay);
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, bias_no_decay);
}
template <typename T> void TransferRPUDeviceCuda<T>::diffuseWeights(T *dev_weights) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::diffuseWeights(dev_weights);
}
template <typename T> void TransferRPUDeviceCuda<T>::clipWeights(T *dev_weights, T clip) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::clipWeights(dev_weights, clip);
}
template class TransferRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class TransferRPUDeviceCuda<double>;
#endif
} // namespace RPU
| 616a05daacb6b8cb7903d8a8abaf608ea75d8421.cu | /**
* (C) Copyright 2020 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "forward_backward_pass.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_transfer_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* DefferenceRPUDeviceCuda
CUDA implementation of TransferRPUDevice
*/
template <typename T> void TransferRPUDeviceCuda<T>::initialize() {
transfer_pwu_ = make_unique<PulsedWeightUpdater<T>>(this->context_, this->x_size_, this->d_size_);
transfer_iom_ = make_unique<InputOutputManager<T>>(this->context_, this->x_size_, this->d_size_);
}
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(
CudaContext *c, const TransferRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(const TransferRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
if (other.transfer_vecs_ != nullptr) {
transfer_vecs_ = make_unique<CudaArray<T>>(*other.transfer_vecs_);
}
initialize();
current_col_indices_ = other.current_col_indices_;
fully_hidden_ = other.fully_hidden_;
this->context_->synchronizeDevice();
};
// copy assignment
template <typename T>
TransferRPUDeviceCuda<T> &
TransferRPUDeviceCuda<T>::operator=(const TransferRPUDeviceCuda<T> &other) {
TransferRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(TransferRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
TransferRPUDeviceCuda<T> &TransferRPUDeviceCuda<T>::operator=(TransferRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
transfer_vecs_ = std::move(other.transfer_vecs_);
current_col_indices_ = other.current_col_indices_;
other.current_col_indices_.clear();
fully_hidden_ = other.fully_hidden_;
transfer_pwu_ = std::move(other.transfer_pwu_);
transfer_iom_ = std::move(other.transfer_iom_);
// ignore transfer_tmp_ or RNG
return *this;
};
template <typename T>
void TransferRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const TransferRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects TransferRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
const auto &par = getPar();
if (!par.single_device_update) {
RPU_FATAL("Multiple device update not supported for Transfer Device");
}
if (!par.same_context) {
RPU_FATAL("Only same context supported");
}
if (this->n_devices_ < 2) {
RPU_FATAL("Expect at least two devices.");
}
for (int j = 1; j < this->n_devices_ - 1; j++) {
if (par.transfer_every_vec[0] > par.transfer_every_vec[j]) {
RPU_FATAL("Later transfer periods need to be larger than first for CUDA.");
}
}
transfer_vecs_ = make_unique<CudaArray<T>>(
this->context_, this->x_size_ * this->x_size_, rpu_device.getTransferVecs());
initialize(); // pwu/iom
current_col_indices_.resize(this->n_devices_ - 1);
std::fill(current_col_indices_.begin(), current_col_indices_.end(), 0);
this->current_update_idx_ = 0;
fully_hidden_ = par.fullyHidden();
}
/* partially transfer using the given "readout" transfer vectors
(with io-managed forward) and the usualy device update */
template <typename T>
void TransferRPUDeviceCuda<T>::forwardUpdate(
int to_device_idx,
int from_device_idx,
int i_col_start,
const T lr,
const T *x_input,
const int n_vec,
const bool trans,
const PulsedUpdateMetaParameter<T> &up) {
if (!lr) {
return;
}
if ((transfer_tmp_ == nullptr) || transfer_tmp_->getSize() < n_vec * this->d_size_) {
transfer_tmp_ = make_unique<CudaArray<T>>(this->context_, this->d_size_ * n_vec);
}
// forward with transfer vectors
RPU::detail::forwardMatrixIteratorIOManaged(
this->context_, this->dev_weights_ptrs_[from_device_idx], x_input, this->x_size_, trans,
transfer_tmp_->getData(), this->d_size_, trans, n_vec,
(T)1.0, // additional output scaling. Obey?
*transfer_iom_, getPar().transfer_io, false);
// update according to device
T *W = this->dev_weights_ptrs_[to_device_idx]; /// note that the ptrs might point to the current
/// weight
// since we need *positive* update, LR needs to be
// negative. However, this is not supported in the PWU
// really. Thus we scale the temp-vector by -1
RPU::math::scal(this->context_, this->d_size_ * n_vec, (T)-1.0, transfer_tmp_->getData(), 1);
transfer_pwu_->update(
x_input, // this is the transfer vector (x_size)
transfer_tmp_->getDataConst(), // this should be d_size
W, &*this->rpucuda_device_vec_[to_device_idx], up, fabs(lr), n_vec, trans, trans);
}
template <typename T>
void TransferRPUDeviceCuda<T>::transfer(
int to_device_idx,
int from_device_idx,
const PulsedUpdateMetaParameter<T> ¤t_up,
const T current_lr) {
int i_col = current_col_indices_[from_device_idx];
const auto &par = getPar();
if (par.random_column) {
i_col = MAX(MIN(floor(this->rw_rng_.sampleUniform() * this->x_size_), this->x_size_ - 1), 0);
}
// transfer_vecs_ is always x_size-major (that is trans==false)
T *tvec = transfer_vecs_->getData() + i_col * this->x_size_;
int n_rest = this->x_size_ - i_col;
T lr = par.getTransferLR(to_device_idx, from_device_idx, current_lr);
const PulsedUpdateMetaParameter<T> *up;
up = &par.transfer_up;
int n_transfers = MIN(par.n_cols_per_transfer, this->x_size_);
if (n_rest < n_transfers) {
// rest
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_rest, false, *up);
// from beginning
forwardUpdate(
to_device_idx, from_device_idx, 0, lr, transfer_vecs_->getData(), n_transfers - n_rest,
false, *up);
} else {
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_transfers, false, *up);
}
if (this->rw_rng_.sampleUniform() <
par.with_reset_prob) { // COL-wise prob!! device-wise reset_prob=1
this->rpucuda_device_vec_[from_device_idx]->resetCols(
this->dev_weights_ptrs_[from_device_idx], i_col, n_transfers, 1);
}
current_col_indices_[from_device_idx] = (i_col + n_transfers) % this->x_size_;
}
template <typename T> int TransferRPUDeviceCuda<T>::getTransferEvery(int didx, int m_batch) const {
if (getPar().units_in_mbatch) {
return MAX(RPU_ROUNDFUN(getPar().transfer_every_vec[didx] * m_batch), 0);
} else {
return MAX(RPU_ROUNDFUN(getPar().transfer_every_vec[didx]), 0);
}
}
template <typename T>
pwukpvec_t<T> TransferRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
pwukpvec_t<T> v;
// use the first device as the "FAST" device that gets updates with the true gradients.
v = this->rpucuda_device_vec_[0]->getUpdateKernels(m_batch, nK32, use_bo64, out_trans, up);
if (RPU_ROUNDFUN((T)m_batch / getTransferEvery(0, m_batch)) > 1) {
for (auto &kpars : v) {
kpars->ensureChunk();
}
}
return v;
}
template <typename T>
void TransferRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
curandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error because derived from abstract
// device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
// always same (up) context.
CudaContext *c = up_context;
if (x_counts_chunk != nullptr || d_counts_chunk != nullptr) {
RPU_FATAL("Chunking not allowed here.");
}
int nchunks =
RPU_ROUNDFUN((T)m_batch / getTransferEvery(0, m_batch)); // take next integer for period
if (nchunks <= 1) {
// just update the whole batch we do not call kpars directly to
// also make possible to have non-pulsed devices. Note that only
// one device is directly updated with the gradients, thus
// tuning kpars are always unique (and valid to that rpu_device
// only). However, the other RPU device kernels will be tuned
// during transfer, since we use a seperate PWU object
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars, c, this->dev_weights_ptrs_[0], m_batch, blm, up, dev_states, one_sided);
this->current_update_idx_ += m_batch; // first update idx
if (!up._currently_tuning) {
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to batches.
int period = (getTransferEvery(j, m_batch) + m_batch - 1) / m_batch; // in m_batch
if (this->current_update_idx_ / m_batch % period == 0) {
transfer(j + 1, j, up, blm->getCurrentLR());
}
}
}
} else {
// need to do it chunkwise
int chunk_size = (m_batch + nchunks - 1) / nchunks; // to ensure not to have residual
for (int i_chunk = 0; i_chunk < nchunks; i_chunk++) {
int batch_start = i_chunk * chunk_size;
// note that last chunk might be smaller.
T current_m_batch = chunk_size - MAX(batch_start + chunk_size - m_batch, 0);
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars,
c, // same context since sequence important
this->dev_weights_ptrs_[0], current_m_batch, blm, up, dev_states, one_sided,
blm->getXCountsData() + i_chunk * this->x_size_ * up.getNK32Default(), // always non-trans
blm->getDCountsData() + i_chunk * this->d_size_ * up.getNK32Default());
this->current_update_idx_ += current_m_batch; // first update idx
if (!up._currently_tuning) {
// transfer
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to chunk_sizes
int period = (getTransferEvery(j, m_batch) + chunk_size - 1) / chunk_size;
if (this->current_update_idx_ / chunk_size % period == 0) {
transfer(j + 1, j, up, blm->getCurrentLR());
}
}
}
}
}
// only reduce at end
this->reduceToWeights(up_context, dev_weights);
}
template <typename T>
void TransferRPUDeviceCuda<T>::reduceToWeights(CudaContext *context, T *dev_weights) {
if (!fully_hidden_) {
VectorRPUDeviceCuda<T>::reduceToWeights(context, dev_weights);
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, T alpha, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, alpha, bias_no_decay);
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, bias_no_decay);
}
template <typename T> void TransferRPUDeviceCuda<T>::diffuseWeights(T *dev_weights) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::diffuseWeights(dev_weights);
}
template <typename T> void TransferRPUDeviceCuda<T>::clipWeights(T *dev_weights, T clip) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::clipWeights(dev_weights, clip);
}
template class TransferRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class TransferRPUDeviceCuda<double>;
#endif
} // namespace RPU
|
01711d3e30c845ef43ce05c59774ecd09b1f6c75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <GL/glut.h>
#include <GL/glu.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "toolfunctions.h"
#include <time.h>
#define N 172032 //131072
#define M 672
#define THRESHOLD 100000
#define GSIZE 1000
#define PNTSIZE 300
#define AUTO_END 10
#define ORTHO 80000
#define RADIUS 1500
#define RADSCALE 1000000000
#define LINE_SIZE 0.3
#define NCOMPASS 360
#define d2r(deg) (deg * PI / 180.0)
#define kill(s) (s->dead = true)
#define PI 3.14159265358979323846
#define MAPX 1273623389
#define MAPY 363706170
#define NNUM 949
#define BNUM 117
#define FNUM 4
int nnum, bnum, fnum;
int point_mode = 0;
int num_hounds;
int width = 800, height = 800;
signal sig[N];
int compass[10][NCOMPASS];
int count[10][NCOMPASS];
int selection_mode = 0; //generator: 0, detector: 1
node *Nodes;
polygon *Buildings;
polygon *Forests;
info GameInfo;
////////////////// cuda time
signal *dev_signals;
node *dev_nodes;
polygon *dev_buildings, *dev_forests;
info *dev_info;
int toggle[10];
void load_file();
void clean_up();
void initialize();
__global__ void signal_calculation(signal *signal_list,
const node *node_list, const polygon *building_list, const polygon *forest_list, const info *dev_info) {
int i = threadIdx.x + (blockIdx.x * blockDim.x);
my_t gx = dev_info->gx;
my_t gy = dev_info->gy;
my_t ax = dev_info->ax[i / (N / dev_info->num_hounds)];
my_t ay = dev_info->ay[i / (N / dev_info->num_hounds)];
//if(i % 10000 == 0)
// printf("gx = %lld, gy = %lld, ax = %lld, ay = %lld\n", gx, gy, ax, ay);
my_t zx, zy;
my_t px, py, test, tdist = 0, kdist = 0;
signal sigref, sigblk;
bool possible;
signal *si = &signal_list[i];
int autoend = -1;
while (!si->dead && ++autoend < AUTO_END) {
si->d = si->vy*si->x - si->vx*si->y;
// case of detection
possible = false;
my_t d = (-si->vy*ax + si->vx*ay + si->d) / RADSCALE;
if (-RADIUS <= d && d <= RADIUS) {
if (si->vx != 0) {
px = ax + (d*si->vy / RADSCALE);
test = (px - si->x) ^ si->vx;
}
else {
py = ay - (d*si->vx / RADSCALE);
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
possible = true;
zx = (si->x - ax);
zy = (si->y - ay);
tdist = zx*zx + zy*zy;
}
}
// reflection test
int n1, n2;
int j, k;
my_t test, kdist;
my_t lx1, ly1, lx2, ly2;
my_t Tnx, Tny, Td, pr;
sigref.dead = true;
int eid;
for (j = 0; j < BNUM; j++) {
// calculate reflection
const polygon *p = &building_list[j];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
eid = 100 * i + k;
if (si->eid == eid) continue;
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) {
continue;
}
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (kdist < 10) continue;
if (sigref.dead || sigref.ss > kdist) { //if marked as alive
my_t lnx = -(ly2 - ly1);
my_t lny = (lx2 - lx1);
my_t nv = lnx*si->vx + lny*si->vy;
sigref.x = px;
sigref.y = py;
sigref.vx = si->vx - 2 * nv * lnx / (lnx*lnx + lny*lny);
sigref.vy = si->vy - 2 * nv * lny / (lnx*lnx + lny*lny);
sigref.ss = kdist;
sigref.eid = eid;
sigref.dead = false;
}
}
}
}
}
}
// blocking test
sigblk.dead = false;
for (i = 0; i < FNUM; i++) {
// calculate reflection
const polygon *p = &forest_list[i];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);//sigin->d;
// p' = p1 + t(p2-p1), T(dot)p' = 0
// t = -(T(dot)p1) / (T(dot)(p2 - p1))
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) continue;
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (!sigblk.dead || sigblk.ss > kdist) {
sigblk.x = px;
sigblk.y = py;
sigblk.ss = kdist;
sigblk.dead = true;
}
}
}
}
}
}
if (!sigref.dead) {
if (sigblk.dead) {
if (possible && tdist < sigref.ss && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
if (sigref.ss < sigblk.ss) {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
else {
kill(si);
break;
}
}
else {
if (possible && tdist < sigref.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
}
}
else {
if (sigblk.dead) {
if (possible && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
kill(si);
break;
}
}
}
if (possible)
si->ss += sqrt((float)tdist);
else
kill(si);
break;
}
if (autoend == AUTO_END) {
kill(si);
}
}
void freeCudaMemory() {
hipFree(dev_signals);
hipFree(dev_nodes);
hipFree(dev_buildings);
hipFree(dev_forests);
hipFree(dev_info);
}
hipError_t allocateCudaMemory() {
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = hipMalloc((void**)&dev_info, sizeof(info));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_signals, N * sizeof(signal));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_nodes, NNUM * sizeof(node));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_nodes, Nodes, NNUM * sizeof(node), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_buildings, BNUM * sizeof(polygon));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_buildings, Buildings, BNUM * sizeof(polygon), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_forests, FNUM * sizeof(polygon));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_forests, Forests, FNUM * sizeof(polygon), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t signalCalcWithCuda()
{
hipError_t cudaStatus;
long double r;
int modN = N - (N%GameInfo.num_hounds);
int eachN = (N / GameInfo.num_hounds);
int i;
for (i = 0; i < N; i++) {
signal *si = &sig[i];
if (i < modN) {
r = d2r(360.0 * (i % eachN) / (long double)eachN);
si->x = GameInfo.gx;
si->y = GameInfo.gy;
si->vx = cosl(r) * RADSCALE;
si->vy = sinl(r) * RADSCALE;
si->ss = 0;
si->dead = false;
si->eid = -1;
}
else {
kill(si);
}
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_signals, &sig, N * sizeof(signal), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_info, &GameInfo, sizeof(info), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
signal_calculation << <M, N / M >> >(dev_signals, dev_nodes, dev_buildings, dev_forests, dev_info);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "signal_calculation launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching signal_calculation!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(sig, dev_signals, N * sizeof(signal), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
return cudaStatus;
}
void convertToCompass() {
int i, j, hidx, sidx;
double sum;
for (i = 0; i < num_hounds; i++) {
for (j = 0; j < NCOMPASS; j++) {
compass[i][j] = 0;
count[i][j] = 0; //initialzie
}
}
int rN = N - (N % num_hounds);
int eachN = rN / num_hounds;
int deg;
for (i = 0; i < rN; i++) {
hidx = i / eachN;
deg = (int)(atan2(-sig[i].vy, -sig[i].vx) * 180 / PI);
if (deg < 0) deg += 360;
sidx = NCOMPASS * deg / 360;
if (!sig[i].dead) {
compass[hidx][sidx] = 1000000000 / sig[i].ss;
count[hidx][sidx] = 1;
}
//compass[hidx]
}
for (i = 0; i < hidx; i++) {
for (j = 0; j < NCOMPASS; j++) {
if(count[i][j] != 0)
compass[i][j] /= count[i][j];
}
}
}
int main(int argc, char* argv[])
{
if (argc == 1) return 0;
initialize();
int i, j;
my_t lat, lon;
lon = atoll(argv[1]);
lat = atoll(argv[2]);
GameInfo.gx = lon - (my_t)MAPX;
GameInfo.gy = lat - (my_t)MAPY;
sscanf_s(argv[3], "%d", &num_hounds);
GameInfo.num_hounds = num_hounds;
for (i = 0; i < num_hounds; i++) {
lon = atoll(argv[4 + i*2]);
lat = atoll(argv[5 + i*2]);
GameInfo.ax[i] = lon - (my_t)MAPX;
GameInfo.ay[i] = lat - (my_t)MAPY;
}
signalCalcWithCuda();
convertToCompass();
fprintf(stdout, "[");
for (j = 0; j < num_hounds; j++) {
fprintf(stdout, "[");
for (i = 0; i < NCOMPASS; i++) {
if (i == NCOMPASS - 1) {
fprintf(stdout, "%d", compass[j][i]);
}
else {
fprintf(stdout, "%d, ", compass[j][i]);
}
}
if (j < num_hounds - 1) {
fprintf(stdout, "], ");
}
else fprintf(stdout, "]");
}
fprintf(stdout, "]");
/*
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA | GLUT_DEPTH);
glutInitWindowSize(width, height);
glutCreateWindow("CS408 SIGNAL TEST");
glutDisplayFunc(display);
glutMouseFunc(onMouseButton); // Register onMouseButton function to call that when user moves mouse.
glutMotionFunc(onMouseDrag); // Register onMouseDrag function to call that when user drags mouse.
glutKeyboardFunc(onKeyPress);
glutMainLoop();
*/
clean_up();
return 0;
}
void initialize() {
load_file();
allocateCudaMemory();
}
void load_file() {
int count;
FILE * fp;
char stmp[255];
char *pstr;
char *context = NULL;
char *token;
const char del[3] = "\t\n";
int nidx, bidx, fidx;
bool firstline = true;
bool isname = true;
int ti;
int tokidx;
my_t mxx, mxy, mix, miy;
int i, ni, maxr;
fopen_s(&fp, "MapData.txt", "rt");
if (fp != NULL)
{
nidx = bidx = fidx = 0;
fscanf_s(fp, "i\t%d\t%d\t%d\n", &nnum, &bnum, &fnum);
//deprecated dynamic allocation
Nodes = (node*)malloc(sizeof(node)*NNUM);
Buildings = (polygon*)malloc(sizeof(polygon)*BNUM);
Forests = (polygon*)malloc(sizeof(polygon)*FNUM);
while (!feof(fp))
{
pstr = fgets(stmp, sizeof(stmp), fp);
if (pstr == NULL) break;
if (pstr[0] == 'n') {
double lat, lon;
sscanf_s(pstr, "n\t%lf\t%lf", &lat, &lon);
Nodes[nidx].x = (my_t)(lon*1e7 - MAPX);
Nodes[nidx].y = (my_t)(lat*1e7 - MAPY);
nidx++;
}
if (pstr[0] == 'b') {
count = 0; //except name tag
for (char *c = pstr; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Buildings[bidx].inodes = (int*)malloc(sizeof(int)*count);
Buildings[bidx].isize = count;
mxx = mxy = -999999999;
mix = miy = 999999999;
token = strtok_s(pstr, del, &context);
tokidx = 0;
isname = true;
while (token != NULL)
{
if (isname) {
token = strtok_s(NULL, del, &context);
isname = false;
continue;
}
sscanf_s(token, "%d", &ti);
Buildings[bidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok_s(NULL, del, &context);
tokidx++;
}
Buildings[bidx].x = (mxx + mix) / 2;
Buildings[bidx].y = (mxy + miy) / 2;
Buildings[bidx].radius = sqrt((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy)) / 2;
/*
Buildings[bidx].radius = 0;
for (i = 0; i < Buildings[bidx].isize; i++) {
ni = Buildings[bidx].inodes[i];
maxr = (int)sqrt((Nodes[ni].x - Buildings[bidx].x)*(Nodes[ni].x - Buildings[bidx].x)
+ (Nodes[ni].y - Buildings[fidx].y)*(Nodes[ni].y - Buildings[bidx].x)) + 1;
if (Buildings[bidx].radius < maxr) {
Buildings[bidx].radius = maxr;
}
}
*/
bidx++;
}
if (pstr[0] == 'f') {
count = 0;
for (char *c = pstr; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Forests[fidx].inodes = (int*)malloc(sizeof(int)*count);
Forests[fidx].isize = count;
mxx = mxy = -999999999;
mix = miy = 999999999;
token = strtok_s(pstr, del, &context);
tokidx = 0;
isname = true;
while (token != NULL)
{
if (isname) {
token = strtok_s(NULL, del, &context);
isname = false;
continue;
}
sscanf_s(token, "%d", &ti);
Forests[fidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok_s(NULL, del, &context);
tokidx++;
}
Forests[fidx].x = (mxx + mix) / 2;
Forests[fidx].y = (mxy + miy) / 2;
Forests[fidx].radius = sqrt((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy)) / 2;
/*
Forests[fidx].radius = 0;
for (i = 0; i < Forests[fidx].isize; i++) {
ni = Forests[fidx].inodes[i];
maxr = (int)sqrt((Nodes[ni].x - Forests[fidx].x)*(Nodes[ni].x - Forests[fidx].x)
+ (Nodes[ni].y - Forests[fidx].y)*(Nodes[ni].y - Forests[fidx].x)) + 1;
if (Forests[fidx].radius < maxr) {
Forests[fidx].radius = maxr;
}
}
*/
fidx++;
}
}
fclose(fp);
}
else
{
//file not exist
fprintf(stderr, "File not found!\n");
}
}
void clean_up() {
int i;
free(Nodes);
/*
for (i = 0; i < BNUM; i++) {
if (Buildings[i].inodes != NULL)
free(Buildings[i].inodes);
}
*/
free(Buildings);
/*
for (i = 0; i < FNUM; i++) {
if (Forests[i].inodes != NULL)
free(Forests[i].inodes);
}
*/
free(Forests);
freeCudaMemory();
} | 01711d3e30c845ef43ce05c59774ecd09b1f6c75.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <GL/glut.h>
#include <GL/glu.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "toolfunctions.h"
#include <time.h>
#define N 172032 //131072
#define M 672
#define THRESHOLD 100000
#define GSIZE 1000
#define PNTSIZE 300
#define AUTO_END 10
#define ORTHO 80000
#define RADIUS 1500
#define RADSCALE 1000000000
#define LINE_SIZE 0.3
#define NCOMPASS 360
#define d2r(deg) (deg * PI / 180.0)
#define kill(s) (s->dead = true)
#define PI 3.14159265358979323846
#define MAPX 1273623389
#define MAPY 363706170
#define NNUM 949
#define BNUM 117
#define FNUM 4
int nnum, bnum, fnum;
int point_mode = 0;
int num_hounds;
int width = 800, height = 800;
signal sig[N];
int compass[10][NCOMPASS];
int count[10][NCOMPASS];
int selection_mode = 0; //generator: 0, detector: 1
node *Nodes;
polygon *Buildings;
polygon *Forests;
info GameInfo;
////////////////// cuda time
signal *dev_signals;
node *dev_nodes;
polygon *dev_buildings, *dev_forests;
info *dev_info;
int toggle[10];
void load_file();
void clean_up();
void initialize();
__global__ void signal_calculation(signal *signal_list,
const node *node_list, const polygon *building_list, const polygon *forest_list, const info *dev_info) {
int i = threadIdx.x + (blockIdx.x * blockDim.x);
my_t gx = dev_info->gx;
my_t gy = dev_info->gy;
my_t ax = dev_info->ax[i / (N / dev_info->num_hounds)];
my_t ay = dev_info->ay[i / (N / dev_info->num_hounds)];
//if(i % 10000 == 0)
// printf("gx = %lld, gy = %lld, ax = %lld, ay = %lld\n", gx, gy, ax, ay);
my_t zx, zy;
my_t px, py, test, tdist = 0, kdist = 0;
signal sigref, sigblk;
bool possible;
signal *si = &signal_list[i];
int autoend = -1;
while (!si->dead && ++autoend < AUTO_END) {
si->d = si->vy*si->x - si->vx*si->y;
// case of detection
possible = false;
my_t d = (-si->vy*ax + si->vx*ay + si->d) / RADSCALE;
if (-RADIUS <= d && d <= RADIUS) {
if (si->vx != 0) {
px = ax + (d*si->vy / RADSCALE);
test = (px - si->x) ^ si->vx;
}
else {
py = ay - (d*si->vx / RADSCALE);
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
possible = true;
zx = (si->x - ax);
zy = (si->y - ay);
tdist = zx*zx + zy*zy;
}
}
// reflection test
int n1, n2;
int j, k;
my_t test, kdist;
my_t lx1, ly1, lx2, ly2;
my_t Tnx, Tny, Td, pr;
sigref.dead = true;
int eid;
for (j = 0; j < BNUM; j++) {
// calculate reflection
const polygon *p = &building_list[j];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
eid = 100 * i + k;
if (si->eid == eid) continue;
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) {
continue;
}
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (kdist < 10) continue;
if (sigref.dead || sigref.ss > kdist) { //if marked as alive
my_t lnx = -(ly2 - ly1);
my_t lny = (lx2 - lx1);
my_t nv = lnx*si->vx + lny*si->vy;
sigref.x = px;
sigref.y = py;
sigref.vx = si->vx - 2 * nv * lnx / (lnx*lnx + lny*lny);
sigref.vy = si->vy - 2 * nv * lny / (lnx*lnx + lny*lny);
sigref.ss = kdist;
sigref.eid = eid;
sigref.dead = false;
}
}
}
}
}
}
// blocking test
sigblk.dead = false;
for (i = 0; i < FNUM; i++) {
// calculate reflection
const polygon *p = &forest_list[i];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);//sigin->d;
// p' = p1 + t(p2-p1), T(dot)p' = 0
// t = -(T(dot)p1) / (T(dot)(p2 - p1))
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) continue;
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (!sigblk.dead || sigblk.ss > kdist) {
sigblk.x = px;
sigblk.y = py;
sigblk.ss = kdist;
sigblk.dead = true;
}
}
}
}
}
}
if (!sigref.dead) {
if (sigblk.dead) {
if (possible && tdist < sigref.ss && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
if (sigref.ss < sigblk.ss) {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
else {
kill(si);
break;
}
}
else {
if (possible && tdist < sigref.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
}
}
else {
if (sigblk.dead) {
if (possible && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
kill(si);
break;
}
}
}
if (possible)
si->ss += sqrt((float)tdist);
else
kill(si);
break;
}
if (autoend == AUTO_END) {
kill(si);
}
}
void freeCudaMemory() {
cudaFree(dev_signals);
cudaFree(dev_nodes);
cudaFree(dev_buildings);
cudaFree(dev_forests);
cudaFree(dev_info);
}
cudaError_t allocateCudaMemory() {
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = cudaMalloc((void**)&dev_info, sizeof(info));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_signals, N * sizeof(signal));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_nodes, NNUM * sizeof(node));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_nodes, Nodes, NNUM * sizeof(node), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_buildings, BNUM * sizeof(polygon));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_buildings, Buildings, BNUM * sizeof(polygon), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_forests, FNUM * sizeof(polygon));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_forests, Forests, FNUM * sizeof(polygon), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t signalCalcWithCuda()
{
cudaError_t cudaStatus;
long double r;
int modN = N - (N%GameInfo.num_hounds);
int eachN = (N / GameInfo.num_hounds);
int i;
for (i = 0; i < N; i++) {
signal *si = &sig[i];
if (i < modN) {
r = d2r(360.0 * (i % eachN) / (long double)eachN);
si->x = GameInfo.gx;
si->y = GameInfo.gy;
si->vx = cosl(r) * RADSCALE;
si->vy = sinl(r) * RADSCALE;
si->ss = 0;
si->dead = false;
si->eid = -1;
}
else {
kill(si);
}
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_signals, &sig, N * sizeof(signal), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_info, &GameInfo, sizeof(info), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
signal_calculation << <M, N / M >> >(dev_signals, dev_nodes, dev_buildings, dev_forests, dev_info);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "signal_calculation launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching signal_calculation!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(sig, dev_signals, N * sizeof(signal), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
return cudaStatus;
}
void convertToCompass() {
int i, j, hidx, sidx;
double sum;
for (i = 0; i < num_hounds; i++) {
for (j = 0; j < NCOMPASS; j++) {
compass[i][j] = 0;
count[i][j] = 0; //initialzie
}
}
int rN = N - (N % num_hounds);
int eachN = rN / num_hounds;
int deg;
for (i = 0; i < rN; i++) {
hidx = i / eachN;
deg = (int)(atan2(-sig[i].vy, -sig[i].vx) * 180 / PI);
if (deg < 0) deg += 360;
sidx = NCOMPASS * deg / 360;
if (!sig[i].dead) {
compass[hidx][sidx] = 1000000000 / sig[i].ss;
count[hidx][sidx] = 1;
}
//compass[hidx]
}
for (i = 0; i < hidx; i++) {
for (j = 0; j < NCOMPASS; j++) {
if(count[i][j] != 0)
compass[i][j] /= count[i][j];
}
}
}
int main(int argc, char* argv[])
{
if (argc == 1) return 0;
initialize();
int i, j;
my_t lat, lon;
lon = atoll(argv[1]);
lat = atoll(argv[2]);
GameInfo.gx = lon - (my_t)MAPX;
GameInfo.gy = lat - (my_t)MAPY;
sscanf_s(argv[3], "%d", &num_hounds);
GameInfo.num_hounds = num_hounds;
for (i = 0; i < num_hounds; i++) {
lon = atoll(argv[4 + i*2]);
lat = atoll(argv[5 + i*2]);
GameInfo.ax[i] = lon - (my_t)MAPX;
GameInfo.ay[i] = lat - (my_t)MAPY;
}
signalCalcWithCuda();
convertToCompass();
fprintf(stdout, "[");
for (j = 0; j < num_hounds; j++) {
fprintf(stdout, "[");
for (i = 0; i < NCOMPASS; i++) {
if (i == NCOMPASS - 1) {
fprintf(stdout, "%d", compass[j][i]);
}
else {
fprintf(stdout, "%d, ", compass[j][i]);
}
}
if (j < num_hounds - 1) {
fprintf(stdout, "], ");
}
else fprintf(stdout, "]");
}
fprintf(stdout, "]");
/*
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA | GLUT_DEPTH);
glutInitWindowSize(width, height);
glutCreateWindow("CS408 SIGNAL TEST");
glutDisplayFunc(display);
glutMouseFunc(onMouseButton); // Register onMouseButton function to call that when user moves mouse.
glutMotionFunc(onMouseDrag); // Register onMouseDrag function to call that when user drags mouse.
glutKeyboardFunc(onKeyPress);
glutMainLoop();
*/
clean_up();
return 0;
}
void initialize() {
load_file();
allocateCudaMemory();
}
void load_file() {
int count;
FILE * fp;
char stmp[255];
char *pstr;
char *context = NULL;
char *token;
const char del[3] = "\t\n";
int nidx, bidx, fidx;
bool firstline = true;
bool isname = true;
int ti;
int tokidx;
my_t mxx, mxy, mix, miy;
int i, ni, maxr;
fopen_s(&fp, "MapData.txt", "rt");
if (fp != NULL)
{
nidx = bidx = fidx = 0;
fscanf_s(fp, "i\t%d\t%d\t%d\n", &nnum, &bnum, &fnum);
//deprecated dynamic allocation
Nodes = (node*)malloc(sizeof(node)*NNUM);
Buildings = (polygon*)malloc(sizeof(polygon)*BNUM);
Forests = (polygon*)malloc(sizeof(polygon)*FNUM);
while (!feof(fp))
{
pstr = fgets(stmp, sizeof(stmp), fp);
if (pstr == NULL) break;
if (pstr[0] == 'n') {
double lat, lon;
sscanf_s(pstr, "n\t%lf\t%lf", &lat, &lon);
Nodes[nidx].x = (my_t)(lon*1e7 - MAPX);
Nodes[nidx].y = (my_t)(lat*1e7 - MAPY);
nidx++;
}
if (pstr[0] == 'b') {
count = 0; //except name tag
for (char *c = pstr; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Buildings[bidx].inodes = (int*)malloc(sizeof(int)*count);
Buildings[bidx].isize = count;
mxx = mxy = -999999999;
mix = miy = 999999999;
token = strtok_s(pstr, del, &context);
tokidx = 0;
isname = true;
while (token != NULL)
{
if (isname) {
token = strtok_s(NULL, del, &context);
isname = false;
continue;
}
sscanf_s(token, "%d", &ti);
Buildings[bidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok_s(NULL, del, &context);
tokidx++;
}
Buildings[bidx].x = (mxx + mix) / 2;
Buildings[bidx].y = (mxy + miy) / 2;
Buildings[bidx].radius = sqrt((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy)) / 2;
/*
Buildings[bidx].radius = 0;
for (i = 0; i < Buildings[bidx].isize; i++) {
ni = Buildings[bidx].inodes[i];
maxr = (int)sqrt((Nodes[ni].x - Buildings[bidx].x)*(Nodes[ni].x - Buildings[bidx].x)
+ (Nodes[ni].y - Buildings[fidx].y)*(Nodes[ni].y - Buildings[bidx].x)) + 1;
if (Buildings[bidx].radius < maxr) {
Buildings[bidx].radius = maxr;
}
}
*/
bidx++;
}
if (pstr[0] == 'f') {
count = 0;
for (char *c = pstr; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Forests[fidx].inodes = (int*)malloc(sizeof(int)*count);
Forests[fidx].isize = count;
mxx = mxy = -999999999;
mix = miy = 999999999;
token = strtok_s(pstr, del, &context);
tokidx = 0;
isname = true;
while (token != NULL)
{
if (isname) {
token = strtok_s(NULL, del, &context);
isname = false;
continue;
}
sscanf_s(token, "%d", &ti);
Forests[fidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok_s(NULL, del, &context);
tokidx++;
}
Forests[fidx].x = (mxx + mix) / 2;
Forests[fidx].y = (mxy + miy) / 2;
Forests[fidx].radius = sqrt((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy)) / 2;
/*
Forests[fidx].radius = 0;
for (i = 0; i < Forests[fidx].isize; i++) {
ni = Forests[fidx].inodes[i];
maxr = (int)sqrt((Nodes[ni].x - Forests[fidx].x)*(Nodes[ni].x - Forests[fidx].x)
+ (Nodes[ni].y - Forests[fidx].y)*(Nodes[ni].y - Forests[fidx].x)) + 1;
if (Forests[fidx].radius < maxr) {
Forests[fidx].radius = maxr;
}
}
*/
fidx++;
}
}
fclose(fp);
}
else
{
//file not exist
fprintf(stderr, "File not found!\n");
}
}
void clean_up() {
int i;
free(Nodes);
/*
for (i = 0; i < BNUM; i++) {
if (Buildings[i].inodes != NULL)
free(Buildings[i].inodes);
}
*/
free(Buildings);
/*
for (i = 0; i < FNUM; i++) {
if (Forests[i].inodes != NULL)
free(Forests[i].inodes);
}
*/
free(Forests);
freeCudaMemory();
} |
90ab0e3a36fb5a3f3a9c412c2d8005dc944ea902.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype, typename Mtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Mtype maxval( - maxDtype<Dtype>());
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = Get<Mtype>(bottom_slice[maxidx]);
}
}
}
top_data[index] = Get<Dtype>(maxval);
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = Get<Dtype>(maxidx);
}
}
}
template <typename Dtype, typename Mtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Mtype aveval(0.);
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += Get<Mtype>(bottom_slice[h * width + w]);
}
}
top_data[index] = Get<Dtype>( aveval / pool_size );
}
}
template <typename Dtype, typename Mtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Mtype cumsum(0.);
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += Get<Mtype>(bottom_slice[h * width + w]);
}
}
const Mtype thres = Get<Mtype>(rand_idx[index] * cumsum);
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += Get<Mtype>(bottom_slice[h * width + w]);
if (cumsum >= thres) {
rand_idx[index] = Get<Dtype>(((n * channels + c) * height + h) * width + w);
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype, typename Mtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Mtype cumsum(FLT_MIN);
Mtype cumvalues(0.);
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += Get<Mtype>(bottom_slice[h * width + w]);
cumvalues += Get<Mtype>(bottom_slice[h * width + w] * bottom_slice[h * width + w]);
}
}
top_data[index] = Get<Dtype>( cumvalues / cumsum );
}
}
template <typename Dtype, typename Mtype>
void PoolingLayer<Dtype,Mtype>::Forward_gpu(const vector<Blob<Dtype,Mtype>*>& bottom,
const vector<Blob<Dtype,Mtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform<Dtype,Mtype>(count, Mtype(0), Mtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Mtype gradient(0.);
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]);
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == Get<Dtype>(h * width + w)) {
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]);
}
}
}
}
bottom_diff[index] = Get<Dtype>(gradient);
}
}
template <typename Dtype, typename Mtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Mtype gradient(0.);
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]) / pool_size;
}
}
bottom_diff[index] = Get<Dtype>(gradient);
}
}
template <typename Dtype, typename Mtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Mtype gradient(0.);
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]) *
(index == Get<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = Get<Dtype>(gradient);
}
}
template <typename Dtype, typename Mtype>
void PoolingLayer<Dtype,Mtype>::Backward_gpu(const vector<Blob<Dtype,Mtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype,Mtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set<Dtype,Mtype>(count, Mtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype,Mtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
| 90ab0e3a36fb5a3f3a9c412c2d8005dc944ea902.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype, typename Mtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Mtype maxval( - maxDtype<Dtype>());
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = Get<Mtype>(bottom_slice[maxidx]);
}
}
}
top_data[index] = Get<Dtype>(maxval);
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = Get<Dtype>(maxidx);
}
}
}
template <typename Dtype, typename Mtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Mtype aveval(0.);
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += Get<Mtype>(bottom_slice[h * width + w]);
}
}
top_data[index] = Get<Dtype>( aveval / pool_size );
}
}
template <typename Dtype, typename Mtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Mtype cumsum(0.);
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += Get<Mtype>(bottom_slice[h * width + w]);
}
}
const Mtype thres = Get<Mtype>(rand_idx[index] * cumsum);
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += Get<Mtype>(bottom_slice[h * width + w]);
if (cumsum >= thres) {
rand_idx[index] = Get<Dtype>(((n * channels + c) * height + h) * width + w);
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype, typename Mtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Mtype cumsum(FLT_MIN);
Mtype cumvalues(0.);
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += Get<Mtype>(bottom_slice[h * width + w]);
cumvalues += Get<Mtype>(bottom_slice[h * width + w] * bottom_slice[h * width + w]);
}
}
top_data[index] = Get<Dtype>( cumvalues / cumsum );
}
}
template <typename Dtype, typename Mtype>
void PoolingLayer<Dtype,Mtype>::Forward_gpu(const vector<Blob<Dtype,Mtype>*>& bottom,
const vector<Blob<Dtype,Mtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform<Dtype,Mtype>(count, Mtype(0), Mtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype, typename Mtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Mtype gradient(0.);
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]);
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == Get<Dtype>(h * width + w)) {
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]);
}
}
}
}
bottom_diff[index] = Get<Dtype>(gradient);
}
}
template <typename Dtype, typename Mtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Mtype gradient(0.);
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]) / pool_size;
}
}
bottom_diff[index] = Get<Dtype>(gradient);
}
}
template <typename Dtype, typename Mtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Mtype gradient(0.);
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += Get<Mtype>(top_diff_slice[ph * pooled_width + pw]) *
(index == Get<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = Get<Dtype>(gradient);
}
}
template <typename Dtype, typename Mtype>
void PoolingLayer<Dtype,Mtype>::Backward_gpu(const vector<Blob<Dtype,Mtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype,Mtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set<Dtype,Mtype>(count, Mtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype,Mtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
b3415a4048a3aa0643fa9da00289b997bc08c4d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, (True, True))
// Input 2 CudaNdarrayType(float32, col)
// Input 3 CudaNdarrayType(float32, matrix)
// Input 4 CudaNdarrayType(float32, matrix)
// Input 5 CudaNdarrayType(float32, (True, True))
// Input 6 CudaNdarrayType(float32, (True, True))
// Input 7 CudaNdarrayType(float32, matrix)
// Input 8 CudaNdarrayType(float32, matrix)
// Input 9 CudaNdarrayType(float32, matrix)
// Input 10 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, const float * i2_data, int i2_str_0
, const float * i3_data, int i3_str_0
, const float * i4_data, int i4_str_0
, const float * i5_data, int i5_str_0
, const float * i6_data, int i6_str_0
, const float * i7_data, int i7_str_0
, const float * i8_data, int i8_str_0
, const float * i9_data, int i9_str_0
, const float * i10_data, int i10_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i1_value = i1_data[0];
const float ii_i5_value = i5_data[0];
const float ii_i6_value = i6_data[0];
const float ii_i10_value = i10_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i2_data = i2_data;
const float * ii_i3_data = i3_data;
const float * ii_i4_data = i4_data;
const float * ii_i7_data = i7_data;
const float * ii_i8_data = i8_data;
const float * ii_i9_data = i9_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_i3_data += pos0 * i3_str_0;
ii_i4_data += pos0 * i4_str_0;
ii_i7_data += pos0 * i7_str_0;
ii_i8_data += pos0 * i8_str_0;
ii_i9_data += pos0 * i9_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i10_value - ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i5_value * ii_i6_value * ii_i9_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i2_data[0] * ii_i8_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = ii_i5_value * ii_i6_value * ii_i7_data[0];
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = ii_i1_value * ii_i2_data[0] * ii_i3_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = V_DUMMY_ID__tmp3 / V_DUMMY_ID__tmp2;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp5 / V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp8;
V_DUMMY_ID__tmp8 = V_DUMMY_ID__tmp7 + V_DUMMY_ID__tmp6;
o0_i = ii_i0_data[0] * V_DUMMY_ID__tmp8 * V_DUMMY_ID__tmp1;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, (True, True))
// Input 2 CudaNdarrayType(float32, col)
// Input 3 CudaNdarrayType(float32, matrix)
// Input 4 CudaNdarrayType(float32, matrix)
// Input 5 CudaNdarrayType(float32, (True, True))
// Input 6 CudaNdarrayType(float32, (True, True))
// Input 7 CudaNdarrayType(float32, matrix)
// Input 8 CudaNdarrayType(float32, matrix)
// Input 9 CudaNdarrayType(float32, matrix)
// Input 10 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, const float * i2_data, int i2_str_0, int i2_str_1
, const float * i3_data, int i3_str_0, int i3_str_1
, const float * i4_data, int i4_str_0, int i4_str_1
, const float * i5_data, int i5_str_0, int i5_str_1
, const float * i6_data, int i6_str_0, int i6_str_1
, const float * i7_data, int i7_str_0, int i7_str_1
, const float * i8_data, int i8_str_0, int i8_str_1
, const float * i9_data, int i9_str_0, int i9_str_1
, const float * i10_data, int i10_str_0, int i10_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i1_value = i1_data[0];
const float ii_i5_value = i5_data[0];
const float ii_i6_value = i6_data[0];
const float ii_i10_value = i10_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i2_data = i2_data;
const float * ii_i3_data = i3_data;
const float * ii_i4_data = i4_data;
const float * ii_i7_data = i7_data;
const float * ii_i8_data = i8_data;
const float * ii_i9_data = i9_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_i2_data += pos1 * i2_str_1;
ii_i3_data += pos1 * i3_str_1;
ii_i4_data += pos1 * i4_str_1;
ii_i7_data += pos1 * i7_str_1;
ii_i8_data += pos1 * i8_str_1;
ii_i9_data += pos1 * i9_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_i3_data += pos0 * i3_str_0;
ii_i4_data += pos0 * i4_str_0;
ii_i7_data += pos0 * i7_str_0;
ii_i8_data += pos0 * i8_str_0;
ii_i9_data += pos0 * i9_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i10_value - ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i5_value * ii_i6_value * ii_i9_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i2_data[0] * ii_i8_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = ii_i5_value * ii_i6_value * ii_i7_data[0];
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = ii_i1_value * ii_i2_data[0] * ii_i3_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = V_DUMMY_ID__tmp3 / V_DUMMY_ID__tmp2;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp5 / V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp8;
V_DUMMY_ID__tmp8 = V_DUMMY_ID__tmp7 + V_DUMMY_ID__tmp6;
o0_i = ii_i0_data[0] * V_DUMMY_ID__tmp8 * V_DUMMY_ID__tmp1;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, (True, True))
// Input 2 CudaNdarrayType(float32, col)
// Input 3 CudaNdarrayType(float32, matrix)
// Input 4 CudaNdarrayType(float32, matrix)
// Input 5 CudaNdarrayType(float32, (True, True))
// Input 6 CudaNdarrayType(float32, (True, True))
// Input 7 CudaNdarrayType(float32, matrix)
// Input 8 CudaNdarrayType(float32, matrix)
// Input 9 CudaNdarrayType(float32, matrix)
// Input 10 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, const float * i2_data
, const float * i3_data
, const float * i4_data
, const float * i5_data
, const float * i6_data
, const float * i7_data
, const float * i8_data
, const float * i9_data
, const float * i10_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i1_value = i1_data[0];
const float ii_i5_value = i5_data[0];
const float ii_i6_value = i6_data[0];
const float ii_i10_value = i10_data[0];
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i10_value - i4_data[i];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i5_value * ii_i6_value * i9_data[i];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = i2_data[i] * i8_data[i] * i4_data[i];
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = ii_i5_value * ii_i6_value * i7_data[i];
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = ii_i1_value * i2_data[i] * i3_data[i] * i4_data[i];
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = V_DUMMY_ID__tmp3 / V_DUMMY_ID__tmp2;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp5 / V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp8;
V_DUMMY_ID__tmp8 = V_DUMMY_ID__tmp7 + V_DUMMY_ID__tmp6;
o0_i = i0_data[i] * V_DUMMY_ID__tmp8 * V_DUMMY_ID__tmp1;
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_8da615e4463fd0663090428e75863d87_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_8da615e4463fd0663090428e75863d87_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, const float * i4_data, const int * i4_str, const float * i5_data, const int * i5_str, const float * i6_data, const int * i6_str, const float * i7_data, const int * i7_str, const float * i8_data, const int * i8_str, const float * i9_data, const int * i9_str, const float * i10_data, const int * i10_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[11][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[2][i]=i2_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[3][i]=i3_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[4][i]=i4_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[5][i]=i5_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[6][i]=i6_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[7][i]=i7_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[8][i]=i8_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[9][i]=i9_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[10][i]=i10_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<11;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<11;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_0[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_2[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[2], nd_collapse_2);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_2[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_3[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[3], nd_collapse_3);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_3[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_4[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[4], nd_collapse_4);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_4[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_7[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[7], nd_collapse_7);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_7[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_8[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[8], nd_collapse_8);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_8[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_9[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[9], nd_collapse_9);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_9[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[2][i-1]=local_str[2][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[2][j-1]=local_str[2][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[3][i-1]=local_str[3][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[3][j-1]=local_str[3][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[4][i-1]=local_str[4][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[4][j-1]=local_str[4][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[5][i-1]=local_str[5][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[5][j-1]=local_str[5][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[6][i-1]=local_str[6][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[6][j-1]=local_str[6][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[7][i-1]=local_str[7][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[7][j-1]=local_str[7][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[8][i-1]=local_str[8][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[8][j-1]=local_str[8][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[9][i-1]=local_str[9][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[9][j-1]=local_str[9][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[10][i-1]=local_str[10][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[10][j-1]=local_str[10][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_str[3][nd_collapse-1]==1 && local_str[4][nd_collapse-1]==1 && local_str[7][nd_collapse-1]==1 && local_str[8][nd_collapse-1]==1 && local_str[9][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, i6_data, i7_data, i8_data, i9_data, i10_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_8da615e4463fd0663090428e75863d87_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, i6_data, i7_data, i8_data, i9_data, i10_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], i6_data, local_str[6][0], i7_data, local_str[7][0], i8_data, local_str[8][0], i9_data, local_str[9][0], i10_data, local_str[10][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_8da615e4463fd0663090428e75863d87_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], i6_data, local_str[6][0], i7_data, local_str[7][0], i8_data, local_str[8][0], i9_data, local_str[9][0], i10_data, local_str[10][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], i6_data, local_str[6][0], local_str[6][1], i7_data, local_str[7][0], local_str[7][1], i8_data, local_str[8][0], local_str[8][1], i9_data, local_str[9][0], local_str[9][1], i10_data, local_str[10][0], local_str[10][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_8da615e4463fd0663090428e75863d87_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], i6_data, local_str[6][0], local_str[6][1], i7_data, local_str[7][0], local_str[7][1], i8_data, local_str[8][0], local_str[8][1], i9_data, local_str[9][0], local_str[9][1], i10_data, local_str[10][0], local_str[10][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_8da615e4463fd0663090428e75863d87 {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V11;
PyObject* storage_V13;
PyObject* storage_V15;
PyObject* storage_V17;
PyObject* storage_V19;
PyObject* storage_V21;
PyObject* storage_V23;
PyObject* storage_V1;
__struct_compiled_op_8da615e4463fd0663090428e75863d87() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_8da615e4463fd0663090428e75863d87(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V11, PyObject* storage_V13, PyObject* storage_V15, PyObject* storage_V17, PyObject* storage_V19, PyObject* storage_V21, PyObject* storage_V23, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V11);
Py_XINCREF(storage_V13);
Py_XINCREF(storage_V15);
Py_XINCREF(storage_V17);
Py_XINCREF(storage_V19);
Py_XINCREF(storage_V21);
Py_XINCREF(storage_V23);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V11 = storage_V11;
this->storage_V13 = storage_V13;
this->storage_V15 = storage_V15;
this->storage_V17 = storage_V17;
this->storage_V19 = storage_V19;
this->storage_V21 = storage_V21;
this->storage_V23 = storage_V23;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_11:
double __DUMMY_11;
__label_13:
double __DUMMY_13;
__label_15:
double __DUMMY_15;
__label_17:
double __DUMMY_17;
__label_19:
double __DUMMY_19;
__label_21:
double __DUMMY_21;
__label_23:
double __DUMMY_23;
__label_26:
double __DUMMY_26;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V11);
Py_XDECREF(this->storage_V13);
Py_XDECREF(this->storage_V15);
Py_XDECREF(this->storage_V17);
Py_XDECREF(this->storage_V19);
Py_XDECREF(this->storage_V21);
Py_XDECREF(this->storage_V23);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
PyObject* py_V7;
CudaNdarray * V7;
PyObject* py_V9;
CudaNdarray * V9;
PyObject* py_V11;
CudaNdarray * V11;
PyObject* py_V13;
CudaNdarray * V13;
PyObject* py_V15;
CudaNdarray * V15;
PyObject* py_V17;
CudaNdarray * V17;
PyObject* py_V19;
CudaNdarray * V19;
PyObject* py_V21;
CudaNdarray * V21;
PyObject* py_V23;
CudaNdarray * V23;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V5)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V5)[0], 0);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V5 << "checking bcast 0 <" << V5->str<< ">\n";
//std::cerr << "c_extract " << V5->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V5)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V5)[0], 0);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V5)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V5)[1], 1);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V5 << "checking bcast 1 <" << V5->str<< ">\n";
//std::cerr << "c_extract " << V5->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V5)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V5)[1], 1);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "bcast check 1 passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V7))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
V7 = (CudaNdarray*)py_V7;
//std::cerr << "c_extract " << V7 << '\n';
if (V7->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V7->nd);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V7)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V7 << "checking bcast 1 <" << V7->str<< ">\n";
//std::cerr << "c_extract " << V7->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V7)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "bcast check 1 passed\n";
assert(V7);
Py_INCREF(py_V7);
}
else if (py_V7 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract done " << V7 << '\n';
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V9))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
V9 = (CudaNdarray*)py_V9;
//std::cerr << "c_extract " << V9 << '\n';
if (V9->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V9->nd);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << " nd check passed\n";
assert(V9);
Py_INCREF(py_V9);
}
else if (py_V9 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract done " << V9 << '\n';
{
py_V11 = PyList_GET_ITEM(storage_V11, 0);
{Py_XINCREF(py_V11);}
assert(py_V11->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V11))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
V11 = (CudaNdarray*)py_V11;
//std::cerr << "c_extract " << V11 << '\n';
if (V11->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V11->nd);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << " nd check passed\n";
assert(V11);
Py_INCREF(py_V11);
}
else if (py_V11 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract done " << V11 << '\n';
{
py_V13 = PyList_GET_ITEM(storage_V13, 0);
{Py_XINCREF(py_V13);}
assert(py_V13->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V13))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt));
V13 = (CudaNdarray*)py_V13;
//std::cerr << "c_extract " << V13 << '\n';
if (V13->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V13->nd);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V13)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V13)[0], 0);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V13 << "checking bcast 0 <" << V13->str<< ">\n";
//std::cerr << "c_extract " << V13->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V13)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V13)[0], 0);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V13)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V13)[1], 1);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V13 << "checking bcast 1 <" << V13->str<< ">\n";
//std::cerr << "c_extract " << V13->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V13)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V13)[1], 1);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "bcast check 1 passed\n";
assert(V13);
Py_INCREF(py_V13);
}
else if (py_V13 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract done " << V13 << '\n';
{
py_V15 = PyList_GET_ITEM(storage_V15, 0);
{Py_XINCREF(py_V15);}
assert(py_V15->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V15))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V15, (py_V15->ob_refcnt));
V15 = (CudaNdarray*)py_V15;
//std::cerr << "c_extract " << V15 << '\n';
if (V15->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V15->nd);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V15)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V15)[0], 0);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V15 << "checking bcast 0 <" << V15->str<< ">\n";
//std::cerr << "c_extract " << V15->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V15)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V15)[0], 0);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V15)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V15)[1], 1);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V15 << "checking bcast 1 <" << V15->str<< ">\n";
//std::cerr << "c_extract " << V15->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V15)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V15)[1], 1);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "bcast check 1 passed\n";
assert(V15);
Py_INCREF(py_V15);
}
else if (py_V15 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V15, (py_V15->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract done " << V15 << '\n';
{
py_V17 = PyList_GET_ITEM(storage_V17, 0);
{Py_XINCREF(py_V17);}
assert(py_V17->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V17))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V17, (py_V17->ob_refcnt));
V17 = (CudaNdarray*)py_V17;
//std::cerr << "c_extract " << V17 << '\n';
if (V17->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V17->nd);
V17 = NULL;
{
__failure = 18;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_18;};
}
//std::cerr << "c_extract " << V17 << " nd check passed\n";
assert(V17);
Py_INCREF(py_V17);
}
else if (py_V17 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V17 = NULL;
{
__failure = 18;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_18;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V17, (py_V17->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V17 = NULL;
{
__failure = 18;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_18;};
}
//std::cerr << "c_extract done " << V17 << '\n';
{
py_V19 = PyList_GET_ITEM(storage_V19, 0);
{Py_XINCREF(py_V19);}
assert(py_V19->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V19))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V19, (py_V19->ob_refcnt));
V19 = (CudaNdarray*)py_V19;
//std::cerr << "c_extract " << V19 << '\n';
if (V19->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V19->nd);
V19 = NULL;
{
__failure = 20;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_20;};
}
//std::cerr << "c_extract " << V19 << " nd check passed\n";
assert(V19);
Py_INCREF(py_V19);
}
else if (py_V19 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V19 = NULL;
{
__failure = 20;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_20;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V19, (py_V19->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V19 = NULL;
{
__failure = 20;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_20;};
}
//std::cerr << "c_extract done " << V19 << '\n';
{
py_V21 = PyList_GET_ITEM(storage_V21, 0);
{Py_XINCREF(py_V21);}
assert(py_V21->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V21))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V21, (py_V21->ob_refcnt));
V21 = (CudaNdarray*)py_V21;
//std::cerr << "c_extract " << V21 << '\n';
if (V21->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V21->nd);
V21 = NULL;
{
__failure = 22;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_22;};
}
//std::cerr << "c_extract " << V21 << " nd check passed\n";
assert(V21);
Py_INCREF(py_V21);
}
else if (py_V21 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V21 = NULL;
{
__failure = 22;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_22;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V21, (py_V21->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V21 = NULL;
{
__failure = 22;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_22;};
}
//std::cerr << "c_extract done " << V21 << '\n';
{
py_V23 = PyList_GET_ITEM(storage_V23, 0);
{Py_XINCREF(py_V23);}
assert(py_V23->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V23))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V23, (py_V23->ob_refcnt));
V23 = (CudaNdarray*)py_V23;
//std::cerr << "c_extract " << V23 << '\n';
if (V23->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V23->nd);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V23)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V23)[0], 0);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V23 << "checking bcast 0 <" << V23->str<< ">\n";
//std::cerr << "c_extract " << V23->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V23)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V23)[0], 0);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V23)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V23)[1], 1);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V23 << "checking bcast 1 <" << V23->str<< ">\n";
//std::cerr << "c_extract " << V23->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V23)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V23)[1], 1);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "bcast check 1 passed\n";
assert(V23);
Py_INCREF(py_V23);
}
else if (py_V23 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V23, (py_V23->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract done " << V23 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {0, 0};
int broadcasts_V5[2] = {1, 1};
int broadcasts_V7[2] = {0, 1};
int broadcasts_V9[2] = {0, 0};
int broadcasts_V11[2] = {0, 0};
int broadcasts_V13[2] = {1, 1};
int broadcasts_V15[2] = {1, 1};
int broadcasts_V17[2] = {0, 0};
int broadcasts_V19[2] = {0, 0};
int broadcasts_V21[2] = {0, 0};
int broadcasts_V23[2] = {1, 1};
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V7\n";
if (2 != V7->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V7->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i];
if ((!(broadcasts_V7[i] &&
CudaNdarray_HOST_DIMS(V7)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V7)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V7 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 2 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V7)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V9\n";
if (2 != V9->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V9->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i];
if ((!(broadcasts_V9[i] &&
CudaNdarray_HOST_DIMS(V9)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V9)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V9 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 3 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V9)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V11\n";
if (2 != V11->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V11->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V11)[i] : dims[i];
if ((!(broadcasts_V11[i] &&
CudaNdarray_HOST_DIMS(V11)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V11)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V11 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 4 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V11)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V13\n";
if (2 != V13->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V13->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V13)[i] : dims[i];
if ((!(broadcasts_V13[i] &&
CudaNdarray_HOST_DIMS(V13)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V13)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V13 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 5 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V13)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V15\n";
if (2 != V15->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V15->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V15)[i] : dims[i];
if ((!(broadcasts_V15[i] &&
CudaNdarray_HOST_DIMS(V15)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V15)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V15 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 6 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V15)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V17\n";
if (2 != V17->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V17->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V17)[i] : dims[i];
if ((!(broadcasts_V17[i] &&
CudaNdarray_HOST_DIMS(V17)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V17)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V17 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 7 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V17)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V19\n";
if (2 != V19->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V19->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V19)[i] : dims[i];
if ((!(broadcasts_V19[i] &&
CudaNdarray_HOST_DIMS(V19)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V19)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V19 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 8 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V19)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V21\n";
if (2 != V21->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V21->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V21)[i] : dims[i];
if ((!(broadcasts_V21[i] &&
CudaNdarray_HOST_DIMS(V21)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V21)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V21 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 9 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V21)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V23\n";
if (2 != V23->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V23->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V23)[i] : dims[i];
if ((!(broadcasts_V23[i] &&
CudaNdarray_HOST_DIMS(V23)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V23)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V23 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 10 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V23)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
Py_XDECREF(V1);
V1 = V3;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 0, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_8da615e4463fd0663090428e75863d87_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7)
, CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9)
, CudaNdarray_DEV_DATA(V11), CudaNdarray_HOST_STRIDES(V11)
, CudaNdarray_DEV_DATA(V13), CudaNdarray_HOST_STRIDES(V13)
, CudaNdarray_DEV_DATA(V15), CudaNdarray_HOST_STRIDES(V15)
, CudaNdarray_DEV_DATA(V17), CudaNdarray_HOST_STRIDES(V17)
, CudaNdarray_DEV_DATA(V19), CudaNdarray_HOST_STRIDES(V19)
, CudaNdarray_DEV_DATA(V21), CudaNdarray_HOST_STRIDES(V21)
, CudaNdarray_DEV_DATA(V23), CudaNdarray_HOST_STRIDES(V23)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} END\n";
__label_25:
double __DUMMY_25;
}
__label_24:
//std::cerr << "cleanup " << py_V23 << " " << V23 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V23, (py_V23->ob_refcnt));
if (V23)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V23, (V23->ob_refcnt));
Py_XDECREF(V23);
}
//std::cerr << "cleanup done" << py_V23 << "\n";
{Py_XDECREF(py_V23);}
double __DUMMY_24;
}
__label_22:
//std::cerr << "cleanup " << py_V21 << " " << V21 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V21, (py_V21->ob_refcnt));
if (V21)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V21, (V21->ob_refcnt));
Py_XDECREF(V21);
}
//std::cerr << "cleanup done" << py_V21 << "\n";
{Py_XDECREF(py_V21);}
double __DUMMY_22;
}
__label_20:
//std::cerr << "cleanup " << py_V19 << " " << V19 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V19, (py_V19->ob_refcnt));
if (V19)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V19, (V19->ob_refcnt));
Py_XDECREF(V19);
}
//std::cerr << "cleanup done" << py_V19 << "\n";
{Py_XDECREF(py_V19);}
double __DUMMY_20;
}
__label_18:
//std::cerr << "cleanup " << py_V17 << " " << V17 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V17, (py_V17->ob_refcnt));
if (V17)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V17, (V17->ob_refcnt));
Py_XDECREF(V17);
}
//std::cerr << "cleanup done" << py_V17 << "\n";
{Py_XDECREF(py_V17);}
double __DUMMY_18;
}
__label_16:
//std::cerr << "cleanup " << py_V15 << " " << V15 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V15, (py_V15->ob_refcnt));
if (V15)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V15, (V15->ob_refcnt));
Py_XDECREF(V15);
}
//std::cerr << "cleanup done" << py_V15 << "\n";
{Py_XDECREF(py_V15);}
double __DUMMY_16;
}
__label_14:
//std::cerr << "cleanup " << py_V13 << " " << V13 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt));
if (V13)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V13, (V13->ob_refcnt));
Py_XDECREF(V13);
}
//std::cerr << "cleanup done" << py_V13 << "\n";
{Py_XDECREF(py_V13);}
double __DUMMY_14;
}
__label_12:
//std::cerr << "cleanup " << py_V11 << " " << V11 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
if (V11)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V11, (V11->ob_refcnt));
Py_XDECREF(V11);
}
//std::cerr << "cleanup done" << py_V11 << "\n";
{Py_XDECREF(py_V11);}
double __DUMMY_12;
}
__label_10:
//std::cerr << "cleanup " << py_V9 << " " << V9 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
if (V9)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt));
Py_XDECREF(V9);
}
//std::cerr << "cleanup done" << py_V9 << "\n";
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
//std::cerr << "cleanup " << py_V7 << " " << V7 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
if (V7)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt));
Py_XDECREF(V7);
}
//std::cerr << "cleanup done" << py_V7 << "\n";
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_8da615e4463fd0663090428e75863d87_executor(__struct_compiled_op_8da615e4463fd0663090428e75863d87* self) {
return self->run();
}
static void __struct_compiled_op_8da615e4463fd0663090428e75863d87_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_8da615e4463fd0663090428e75863d87*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (13 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 13, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_8da615e4463fd0663090428e75863d87* struct_ptr = new __struct_compiled_op_8da615e4463fd0663090428e75863d87();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5),PyTuple_GET_ITEM(argtuple, 6),PyTuple_GET_ITEM(argtuple, 7),PyTuple_GET_ITEM(argtuple, 8),PyTuple_GET_ITEM(argtuple, 9),PyTuple_GET_ITEM(argtuple, 10),PyTuple_GET_ITEM(argtuple, 11),PyTuple_GET_ITEM(argtuple, 12) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_8da615e4463fd0663090428e75863d87_executor), struct_ptr, __struct_compiled_op_8da615e4463fd0663090428e75863d87_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC init8da615e4463fd0663090428e75863d87(void){
(void) Py_InitModule("8da615e4463fd0663090428e75863d87", MyMethods);
}
| b3415a4048a3aa0643fa9da00289b997bc08c4d3.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, (True, True))
// Input 2 CudaNdarrayType(float32, col)
// Input 3 CudaNdarrayType(float32, matrix)
// Input 4 CudaNdarrayType(float32, matrix)
// Input 5 CudaNdarrayType(float32, (True, True))
// Input 6 CudaNdarrayType(float32, (True, True))
// Input 7 CudaNdarrayType(float32, matrix)
// Input 8 CudaNdarrayType(float32, matrix)
// Input 9 CudaNdarrayType(float32, matrix)
// Input 10 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, const float * i2_data, int i2_str_0
, const float * i3_data, int i3_str_0
, const float * i4_data, int i4_str_0
, const float * i5_data, int i5_str_0
, const float * i6_data, int i6_str_0
, const float * i7_data, int i7_str_0
, const float * i8_data, int i8_str_0
, const float * i9_data, int i9_str_0
, const float * i10_data, int i10_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i1_value = i1_data[0];
const float ii_i5_value = i5_data[0];
const float ii_i6_value = i6_data[0];
const float ii_i10_value = i10_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i2_data = i2_data;
const float * ii_i3_data = i3_data;
const float * ii_i4_data = i4_data;
const float * ii_i7_data = i7_data;
const float * ii_i8_data = i8_data;
const float * ii_i9_data = i9_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_i3_data += pos0 * i3_str_0;
ii_i4_data += pos0 * i4_str_0;
ii_i7_data += pos0 * i7_str_0;
ii_i8_data += pos0 * i8_str_0;
ii_i9_data += pos0 * i9_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i10_value - ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i5_value * ii_i6_value * ii_i9_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i2_data[0] * ii_i8_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = ii_i5_value * ii_i6_value * ii_i7_data[0];
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = ii_i1_value * ii_i2_data[0] * ii_i3_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = V_DUMMY_ID__tmp3 / V_DUMMY_ID__tmp2;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp5 / V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp8;
V_DUMMY_ID__tmp8 = V_DUMMY_ID__tmp7 + V_DUMMY_ID__tmp6;
o0_i = ii_i0_data[0] * V_DUMMY_ID__tmp8 * V_DUMMY_ID__tmp1;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, (True, True))
// Input 2 CudaNdarrayType(float32, col)
// Input 3 CudaNdarrayType(float32, matrix)
// Input 4 CudaNdarrayType(float32, matrix)
// Input 5 CudaNdarrayType(float32, (True, True))
// Input 6 CudaNdarrayType(float32, (True, True))
// Input 7 CudaNdarrayType(float32, matrix)
// Input 8 CudaNdarrayType(float32, matrix)
// Input 9 CudaNdarrayType(float32, matrix)
// Input 10 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, const float * i2_data, int i2_str_0, int i2_str_1
, const float * i3_data, int i3_str_0, int i3_str_1
, const float * i4_data, int i4_str_0, int i4_str_1
, const float * i5_data, int i5_str_0, int i5_str_1
, const float * i6_data, int i6_str_0, int i6_str_1
, const float * i7_data, int i7_str_0, int i7_str_1
, const float * i8_data, int i8_str_0, int i8_str_1
, const float * i9_data, int i9_str_0, int i9_str_1
, const float * i10_data, int i10_str_0, int i10_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i1_value = i1_data[0];
const float ii_i5_value = i5_data[0];
const float ii_i6_value = i6_data[0];
const float ii_i10_value = i10_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i2_data = i2_data;
const float * ii_i3_data = i3_data;
const float * ii_i4_data = i4_data;
const float * ii_i7_data = i7_data;
const float * ii_i8_data = i8_data;
const float * ii_i9_data = i9_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_i2_data += pos1 * i2_str_1;
ii_i3_data += pos1 * i3_str_1;
ii_i4_data += pos1 * i4_str_1;
ii_i7_data += pos1 * i7_str_1;
ii_i8_data += pos1 * i8_str_1;
ii_i9_data += pos1 * i9_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i2_data += pos0 * i2_str_0;
ii_i3_data += pos0 * i3_str_0;
ii_i4_data += pos0 * i4_str_0;
ii_i7_data += pos0 * i7_str_0;
ii_i8_data += pos0 * i8_str_0;
ii_i9_data += pos0 * i9_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i10_value - ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i5_value * ii_i6_value * ii_i9_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i2_data[0] * ii_i8_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = ii_i5_value * ii_i6_value * ii_i7_data[0];
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = ii_i1_value * ii_i2_data[0] * ii_i3_data[0] * ii_i4_data[0];
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = V_DUMMY_ID__tmp3 / V_DUMMY_ID__tmp2;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp5 / V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp8;
V_DUMMY_ID__tmp8 = V_DUMMY_ID__tmp7 + V_DUMMY_ID__tmp6;
o0_i = ii_i0_data[0] * V_DUMMY_ID__tmp8 * V_DUMMY_ID__tmp1;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, (True, True))
// Input 2 CudaNdarrayType(float32, col)
// Input 3 CudaNdarrayType(float32, matrix)
// Input 4 CudaNdarrayType(float32, matrix)
// Input 5 CudaNdarrayType(float32, (True, True))
// Input 6 CudaNdarrayType(float32, (True, True))
// Input 7 CudaNdarrayType(float32, matrix)
// Input 8 CudaNdarrayType(float32, matrix)
// Input 9 CudaNdarrayType(float32, matrix)
// Input 10 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, const float * i2_data
, const float * i3_data
, const float * i4_data
, const float * i5_data
, const float * i6_data
, const float * i7_data
, const float * i8_data
, const float * i9_data
, const float * i10_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i1_value = i1_data[0];
const float ii_i5_value = i5_data[0];
const float ii_i6_value = i6_data[0];
const float ii_i10_value = i10_data[0];
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i10_value - i4_data[i];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i5_value * ii_i6_value * i9_data[i];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = i2_data[i] * i8_data[i] * i4_data[i];
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = ii_i5_value * ii_i6_value * i7_data[i];
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = ii_i1_value * i2_data[i] * i3_data[i] * i4_data[i];
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = V_DUMMY_ID__tmp3 / V_DUMMY_ID__tmp2;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp5 / V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp8;
V_DUMMY_ID__tmp8 = V_DUMMY_ID__tmp7 + V_DUMMY_ID__tmp6;
o0_i = i0_data[i] * V_DUMMY_ID__tmp8 * V_DUMMY_ID__tmp1;
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_8da615e4463fd0663090428e75863d87_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_8da615e4463fd0663090428e75863d87_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, const float * i4_data, const int * i4_str, const float * i5_data, const int * i5_str, const float * i6_data, const int * i6_str, const float * i7_data, const int * i7_str, const float * i8_data, const int * i8_str, const float * i9_data, const int * i9_str, const float * i10_data, const int * i10_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[11][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[2][i]=i2_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[3][i]=i3_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[4][i]=i4_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[5][i]=i5_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[6][i]=i6_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[7][i]=i7_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[8][i]=i8_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[9][i]=i9_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[10][i]=i10_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<11;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<11;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_0[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_2[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[2], nd_collapse_2);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_2[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_3[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[3], nd_collapse_3);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_3[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_4[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[4], nd_collapse_4);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_4[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_7[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[7], nd_collapse_7);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_7[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_8[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[8], nd_collapse_8);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_8[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_9[2] = {1,1};
can_collapse_node_8da615e4463fd0663090428e75863d87_0(nd_collapse, local_dims, local_str[9], nd_collapse_9);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_9[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[2][i-1]=local_str[2][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[2][j-1]=local_str[2][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[3][i-1]=local_str[3][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[3][j-1]=local_str[3][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[4][i-1]=local_str[4][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[4][j-1]=local_str[4][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[5][i-1]=local_str[5][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[5][j-1]=local_str[5][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[6][i-1]=local_str[6][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[6][j-1]=local_str[6][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[7][i-1]=local_str[7][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[7][j-1]=local_str[7][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[8][i-1]=local_str[8][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[8][j-1]=local_str[8][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[9][i-1]=local_str[9][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[9][j-1]=local_str[9][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[10][i-1]=local_str[10][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[10][j-1]=local_str[10][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_str[3][nd_collapse-1]==1 && local_str[4][nd_collapse-1]==1 && local_str[7][nd_collapse-1]==1 && local_str[8][nd_collapse-1]==1 && local_str[9][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, i6_data, i7_data, i8_data, i9_data, i10_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_8da615e4463fd0663090428e75863d87_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, i6_data, i7_data, i8_data, i9_data, i10_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], i6_data, local_str[6][0], i7_data, local_str[7][0], i8_data, local_str[8][0], i9_data, local_str[9][0], i10_data, local_str[10][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_8da615e4463fd0663090428e75863d87_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], i6_data, local_str[6][0], i7_data, local_str[7][0], i8_data, local_str[8][0], i9_data, local_str[9][0], i10_data, local_str[10][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], i6_data, local_str[6][0], local_str[6][1], i7_data, local_str[7][0], local_str[7][1], i8_data, local_str[8][0], local_str[8][1], i9_data, local_str[9][0], local_str[9][1], i10_data, local_str[10][0], local_str[10][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_8da615e4463fd0663090428e75863d87_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_8da615e4463fd0663090428e75863d87_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], i6_data, local_str[6][0], local_str[6][1], i7_data, local_str[7][0], local_str[7][1], i8_data, local_str[8][0], local_str[8][1], i9_data, local_str[9][0], local_str[9][1], i10_data, local_str[10][0], local_str[10][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_8da615e4463fd0663090428e75863d87 {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V11;
PyObject* storage_V13;
PyObject* storage_V15;
PyObject* storage_V17;
PyObject* storage_V19;
PyObject* storage_V21;
PyObject* storage_V23;
PyObject* storage_V1;
__struct_compiled_op_8da615e4463fd0663090428e75863d87() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_8da615e4463fd0663090428e75863d87(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V11, PyObject* storage_V13, PyObject* storage_V15, PyObject* storage_V17, PyObject* storage_V19, PyObject* storage_V21, PyObject* storage_V23, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V11);
Py_XINCREF(storage_V13);
Py_XINCREF(storage_V15);
Py_XINCREF(storage_V17);
Py_XINCREF(storage_V19);
Py_XINCREF(storage_V21);
Py_XINCREF(storage_V23);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V11 = storage_V11;
this->storage_V13 = storage_V13;
this->storage_V15 = storage_V15;
this->storage_V17 = storage_V17;
this->storage_V19 = storage_V19;
this->storage_V21 = storage_V21;
this->storage_V23 = storage_V23;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_11:
double __DUMMY_11;
__label_13:
double __DUMMY_13;
__label_15:
double __DUMMY_15;
__label_17:
double __DUMMY_17;
__label_19:
double __DUMMY_19;
__label_21:
double __DUMMY_21;
__label_23:
double __DUMMY_23;
__label_26:
double __DUMMY_26;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V11);
Py_XDECREF(this->storage_V13);
Py_XDECREF(this->storage_V15);
Py_XDECREF(this->storage_V17);
Py_XDECREF(this->storage_V19);
Py_XDECREF(this->storage_V21);
Py_XDECREF(this->storage_V23);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
PyObject* py_V7;
CudaNdarray * V7;
PyObject* py_V9;
CudaNdarray * V9;
PyObject* py_V11;
CudaNdarray * V11;
PyObject* py_V13;
CudaNdarray * V13;
PyObject* py_V15;
CudaNdarray * V15;
PyObject* py_V17;
CudaNdarray * V17;
PyObject* py_V19;
CudaNdarray * V19;
PyObject* py_V21;
CudaNdarray * V21;
PyObject* py_V23;
CudaNdarray * V23;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V5)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V5)[0], 0);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V5 << "checking bcast 0 <" << V5->str<< ">\n";
//std::cerr << "c_extract " << V5->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V5)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V5)[0], 0);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V5)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V5)[1], 1);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V5 << "checking bcast 1 <" << V5->str<< ">\n";
//std::cerr << "c_extract " << V5->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V5)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V5)[1], 1);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << "bcast check 1 passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V7))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
V7 = (CudaNdarray*)py_V7;
//std::cerr << "c_extract " << V7 << '\n';
if (V7->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V7->nd);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V7)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V7 << "checking bcast 1 <" << V7->str<< ">\n";
//std::cerr << "c_extract " << V7->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V7)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "bcast check 1 passed\n";
assert(V7);
Py_INCREF(py_V7);
}
else if (py_V7 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract done " << V7 << '\n';
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V9))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
V9 = (CudaNdarray*)py_V9;
//std::cerr << "c_extract " << V9 << '\n';
if (V9->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V9->nd);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << " nd check passed\n";
assert(V9);
Py_INCREF(py_V9);
}
else if (py_V9 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract done " << V9 << '\n';
{
py_V11 = PyList_GET_ITEM(storage_V11, 0);
{Py_XINCREF(py_V11);}
assert(py_V11->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V11))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
V11 = (CudaNdarray*)py_V11;
//std::cerr << "c_extract " << V11 << '\n';
if (V11->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V11->nd);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << " nd check passed\n";
assert(V11);
Py_INCREF(py_V11);
}
else if (py_V11 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract done " << V11 << '\n';
{
py_V13 = PyList_GET_ITEM(storage_V13, 0);
{Py_XINCREF(py_V13);}
assert(py_V13->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V13))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt));
V13 = (CudaNdarray*)py_V13;
//std::cerr << "c_extract " << V13 << '\n';
if (V13->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V13->nd);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V13)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V13)[0], 0);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V13 << "checking bcast 0 <" << V13->str<< ">\n";
//std::cerr << "c_extract " << V13->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V13)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V13)[0], 0);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V13)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V13)[1], 1);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V13 << "checking bcast 1 <" << V13->str<< ">\n";
//std::cerr << "c_extract " << V13->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V13)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V13)[1], 1);
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract " << V13 << "bcast check 1 passed\n";
assert(V13);
Py_INCREF(py_V13);
}
else if (py_V13 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V13 = NULL;
{
__failure = 14;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_14;};
}
//std::cerr << "c_extract done " << V13 << '\n';
{
py_V15 = PyList_GET_ITEM(storage_V15, 0);
{Py_XINCREF(py_V15);}
assert(py_V15->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V15))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V15, (py_V15->ob_refcnt));
V15 = (CudaNdarray*)py_V15;
//std::cerr << "c_extract " << V15 << '\n';
if (V15->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V15->nd);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V15)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V15)[0], 0);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V15 << "checking bcast 0 <" << V15->str<< ">\n";
//std::cerr << "c_extract " << V15->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V15)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V15)[0], 0);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V15)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V15)[1], 1);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V15 << "checking bcast 1 <" << V15->str<< ">\n";
//std::cerr << "c_extract " << V15->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V15)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V15)[1], 1);
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract " << V15 << "bcast check 1 passed\n";
assert(V15);
Py_INCREF(py_V15);
}
else if (py_V15 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V15, (py_V15->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V15 = NULL;
{
__failure = 16;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_16;};
}
//std::cerr << "c_extract done " << V15 << '\n';
{
py_V17 = PyList_GET_ITEM(storage_V17, 0);
{Py_XINCREF(py_V17);}
assert(py_V17->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V17))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V17, (py_V17->ob_refcnt));
V17 = (CudaNdarray*)py_V17;
//std::cerr << "c_extract " << V17 << '\n';
if (V17->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V17->nd);
V17 = NULL;
{
__failure = 18;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_18;};
}
//std::cerr << "c_extract " << V17 << " nd check passed\n";
assert(V17);
Py_INCREF(py_V17);
}
else if (py_V17 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V17 = NULL;
{
__failure = 18;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_18;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V17, (py_V17->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V17 = NULL;
{
__failure = 18;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_18;};
}
//std::cerr << "c_extract done " << V17 << '\n';
{
py_V19 = PyList_GET_ITEM(storage_V19, 0);
{Py_XINCREF(py_V19);}
assert(py_V19->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V19))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V19, (py_V19->ob_refcnt));
V19 = (CudaNdarray*)py_V19;
//std::cerr << "c_extract " << V19 << '\n';
if (V19->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V19->nd);
V19 = NULL;
{
__failure = 20;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_20;};
}
//std::cerr << "c_extract " << V19 << " nd check passed\n";
assert(V19);
Py_INCREF(py_V19);
}
else if (py_V19 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V19 = NULL;
{
__failure = 20;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_20;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V19, (py_V19->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V19 = NULL;
{
__failure = 20;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_20;};
}
//std::cerr << "c_extract done " << V19 << '\n';
{
py_V21 = PyList_GET_ITEM(storage_V21, 0);
{Py_XINCREF(py_V21);}
assert(py_V21->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V21))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V21, (py_V21->ob_refcnt));
V21 = (CudaNdarray*)py_V21;
//std::cerr << "c_extract " << V21 << '\n';
if (V21->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V21->nd);
V21 = NULL;
{
__failure = 22;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_22;};
}
//std::cerr << "c_extract " << V21 << " nd check passed\n";
assert(V21);
Py_INCREF(py_V21);
}
else if (py_V21 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V21 = NULL;
{
__failure = 22;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_22;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V21, (py_V21->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V21 = NULL;
{
__failure = 22;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_22;};
}
//std::cerr << "c_extract done " << V21 << '\n';
{
py_V23 = PyList_GET_ITEM(storage_V23, 0);
{Py_XINCREF(py_V23);}
assert(py_V23->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V23))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V23, (py_V23->ob_refcnt));
V23 = (CudaNdarray*)py_V23;
//std::cerr << "c_extract " << V23 << '\n';
if (V23->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V23->nd);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V23)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V23)[0], 0);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V23 << "checking bcast 0 <" << V23->str<< ">\n";
//std::cerr << "c_extract " << V23->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V23)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V23)[0], 0);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V23)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V23)[1], 1);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V23 << "checking bcast 1 <" << V23->str<< ">\n";
//std::cerr << "c_extract " << V23->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V23)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V23)[1], 1);
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract " << V23 << "bcast check 1 passed\n";
assert(V23);
Py_INCREF(py_V23);
}
else if (py_V23 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V23, (py_V23->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V23 = NULL;
{
__failure = 24;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_24;};
}
//std::cerr << "c_extract done " << V23 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {0, 0};
int broadcasts_V5[2] = {1, 1};
int broadcasts_V7[2] = {0, 1};
int broadcasts_V9[2] = {0, 0};
int broadcasts_V11[2] = {0, 0};
int broadcasts_V13[2] = {1, 1};
int broadcasts_V15[2] = {1, 1};
int broadcasts_V17[2] = {0, 0};
int broadcasts_V19[2] = {0, 0};
int broadcasts_V21[2] = {0, 0};
int broadcasts_V23[2] = {1, 1};
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V7\n";
if (2 != V7->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V7->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i];
if ((!(broadcasts_V7[i] &&
CudaNdarray_HOST_DIMS(V7)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V7)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V7 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 2 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V7)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V9\n";
if (2 != V9->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V9->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i];
if ((!(broadcasts_V9[i] &&
CudaNdarray_HOST_DIMS(V9)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V9)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V9 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 3 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V9)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V11\n";
if (2 != V11->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V11->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V11)[i] : dims[i];
if ((!(broadcasts_V11[i] &&
CudaNdarray_HOST_DIMS(V11)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V11)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V11 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 4 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V11)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V13\n";
if (2 != V13->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V13->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V13)[i] : dims[i];
if ((!(broadcasts_V13[i] &&
CudaNdarray_HOST_DIMS(V13)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V13)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V13 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 5 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V13)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V15\n";
if (2 != V15->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V15->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V15)[i] : dims[i];
if ((!(broadcasts_V15[i] &&
CudaNdarray_HOST_DIMS(V15)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V15)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V15 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 6 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V15)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V17\n";
if (2 != V17->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V17->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V17)[i] : dims[i];
if ((!(broadcasts_V17[i] &&
CudaNdarray_HOST_DIMS(V17)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V17)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V17 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 7 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V17)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V19\n";
if (2 != V19->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V19->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V19)[i] : dims[i];
if ((!(broadcasts_V19[i] &&
CudaNdarray_HOST_DIMS(V19)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V19)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V19 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 8 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V19)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V21\n";
if (2 != V21->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V21->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V21)[i] : dims[i];
if ((!(broadcasts_V21[i] &&
CudaNdarray_HOST_DIMS(V21)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V21)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V21 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 9 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V21)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V23\n";
if (2 != V23->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V23->nd);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V23)[i] : dims[i];
if ((!(broadcasts_V23[i] &&
CudaNdarray_HOST_DIMS(V23)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V23)[i]))
{
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} checking input V23 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 10 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V23)[i],
dims[i]
);
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
Py_XDECREF(V1);
V1 = V3;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 0, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_8da615e4463fd0663090428e75863d87_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7)
, CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9)
, CudaNdarray_DEV_DATA(V11), CudaNdarray_HOST_STRIDES(V11)
, CudaNdarray_DEV_DATA(V13), CudaNdarray_HOST_STRIDES(V13)
, CudaNdarray_DEV_DATA(V15), CudaNdarray_HOST_STRIDES(V15)
, CudaNdarray_DEV_DATA(V17), CudaNdarray_HOST_STRIDES(V17)
, CudaNdarray_DEV_DATA(V19), CudaNdarray_HOST_STRIDES(V19)
, CudaNdarray_DEV_DATA(V21), CudaNdarray_HOST_STRIDES(V21)
, CudaNdarray_DEV_DATA(V23), CudaNdarray_HOST_STRIDES(V23)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 25;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_25;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{(i0 * (((i1 * i2 * i3 * i4) / (i5 * i6 * i7)) + ((i2 * i8 * i4) / (i5 * i6 * i9))) * (i10 - i4))} END\n";
__label_25:
double __DUMMY_25;
}
__label_24:
//std::cerr << "cleanup " << py_V23 << " " << V23 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V23, (py_V23->ob_refcnt));
if (V23)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V23, (V23->ob_refcnt));
Py_XDECREF(V23);
}
//std::cerr << "cleanup done" << py_V23 << "\n";
{Py_XDECREF(py_V23);}
double __DUMMY_24;
}
__label_22:
//std::cerr << "cleanup " << py_V21 << " " << V21 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V21, (py_V21->ob_refcnt));
if (V21)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V21, (V21->ob_refcnt));
Py_XDECREF(V21);
}
//std::cerr << "cleanup done" << py_V21 << "\n";
{Py_XDECREF(py_V21);}
double __DUMMY_22;
}
__label_20:
//std::cerr << "cleanup " << py_V19 << " " << V19 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V19, (py_V19->ob_refcnt));
if (V19)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V19, (V19->ob_refcnt));
Py_XDECREF(V19);
}
//std::cerr << "cleanup done" << py_V19 << "\n";
{Py_XDECREF(py_V19);}
double __DUMMY_20;
}
__label_18:
//std::cerr << "cleanup " << py_V17 << " " << V17 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V17, (py_V17->ob_refcnt));
if (V17)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V17, (V17->ob_refcnt));
Py_XDECREF(V17);
}
//std::cerr << "cleanup done" << py_V17 << "\n";
{Py_XDECREF(py_V17);}
double __DUMMY_18;
}
__label_16:
//std::cerr << "cleanup " << py_V15 << " " << V15 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V15, (py_V15->ob_refcnt));
if (V15)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V15, (V15->ob_refcnt));
Py_XDECREF(V15);
}
//std::cerr << "cleanup done" << py_V15 << "\n";
{Py_XDECREF(py_V15);}
double __DUMMY_16;
}
__label_14:
//std::cerr << "cleanup " << py_V13 << " " << V13 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt));
if (V13)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V13, (V13->ob_refcnt));
Py_XDECREF(V13);
}
//std::cerr << "cleanup done" << py_V13 << "\n";
{Py_XDECREF(py_V13);}
double __DUMMY_14;
}
__label_12:
//std::cerr << "cleanup " << py_V11 << " " << V11 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
if (V11)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V11, (V11->ob_refcnt));
Py_XDECREF(V11);
}
//std::cerr << "cleanup done" << py_V11 << "\n";
{Py_XDECREF(py_V11);}
double __DUMMY_12;
}
__label_10:
//std::cerr << "cleanup " << py_V9 << " " << V9 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
if (V9)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt));
Py_XDECREF(V9);
}
//std::cerr << "cleanup done" << py_V9 << "\n";
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
//std::cerr << "cleanup " << py_V7 << " " << V7 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
if (V7)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt));
Py_XDECREF(V7);
}
//std::cerr << "cleanup done" << py_V7 << "\n";
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_8da615e4463fd0663090428e75863d87_executor(__struct_compiled_op_8da615e4463fd0663090428e75863d87* self) {
return self->run();
}
static void __struct_compiled_op_8da615e4463fd0663090428e75863d87_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_8da615e4463fd0663090428e75863d87*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (13 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 13, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_8da615e4463fd0663090428e75863d87* struct_ptr = new __struct_compiled_op_8da615e4463fd0663090428e75863d87();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5),PyTuple_GET_ITEM(argtuple, 6),PyTuple_GET_ITEM(argtuple, 7),PyTuple_GET_ITEM(argtuple, 8),PyTuple_GET_ITEM(argtuple, 9),PyTuple_GET_ITEM(argtuple, 10),PyTuple_GET_ITEM(argtuple, 11),PyTuple_GET_ITEM(argtuple, 12) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_8da615e4463fd0663090428e75863d87_executor), struct_ptr, __struct_compiled_op_8da615e4463fd0663090428e75863d87_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC init8da615e4463fd0663090428e75863d87(void){
(void) Py_InitModule("8da615e4463fd0663090428e75863d87", MyMethods);
}
|
d474cf44c317763a27d4d509c08fd782197b54d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cube.cuh"
__global__ void cube_core(int *dev_a, int *dev_b){
int tid=blockIdx.x;
int tmp=*(dev_a+tid);
*(dev_b+tid)=tmp*tmp*tmp;
}
void cube(int result[], int n){
int a[n];
for(int i=0;i<n;i++){
a[i]=i;
}
int *dev_a=NULL;
int *dev_b=NULL;
hipMalloc((void**)&dev_a,n*sizeof(int));
hipMemset((void**)&dev_a,0,n*sizeof(int));
hipMalloc((void**)&dev_b,n*sizeof(int));
hipMemset((void**)&dev_b,0,n*sizeof(int));
hipMemcpy(dev_a,(void**)&a,n*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cube_core), dim3(n),dim3(1), 0, 0, dev_a,dev_b);
hipMemcpy((void **)&result[0],dev_b,n*sizeof(int),hipMemcpyDeviceToHost);
} | d474cf44c317763a27d4d509c08fd782197b54d2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cube.cuh"
__global__ void cube_core(int *dev_a, int *dev_b){
int tid=blockIdx.x;
int tmp=*(dev_a+tid);
*(dev_b+tid)=tmp*tmp*tmp;
}
void cube(int result[], int n){
int a[n];
for(int i=0;i<n;i++){
a[i]=i;
}
int *dev_a=NULL;
int *dev_b=NULL;
cudaMalloc((void**)&dev_a,n*sizeof(int));
cudaMemset((void**)&dev_a,0,n*sizeof(int));
cudaMalloc((void**)&dev_b,n*sizeof(int));
cudaMemset((void**)&dev_b,0,n*sizeof(int));
cudaMemcpy(dev_a,(void**)&a,n*sizeof(int),cudaMemcpyHostToDevice);
cube_core<<<n,1>>>(dev_a,dev_b);
cudaMemcpy((void **)&result[0],dev_b,n*sizeof(int),cudaMemcpyDeviceToHost);
} |
28782233d2e4a25f3a3fbbff46cbe29f6fad71f2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*/
#include "../helper_string.h"
#include <cuda/runtime_api.hpp>
#include <stdio.h>
#include <assert.h>
#include <vector>
// CUDA runtime includes
#include <hip/hip_runtime_api.h>
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
hipIpcEventHandle_t eventHandle;
hipIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
count = cuda::device::count();
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
auto prop = cuda::device::get(i).properties();
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != hipComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
bool canAccessPeer_0i, canAccessPeer_i0;
auto device_0 = cuda::device::get(0);
for (i = 1; i < uvaCount; i++)
{
auto device_i = cuda::device::get(i);
canAccessPeer_0i = cuda::device::peer_to_peer::can_access(device_0, device_i);
canAccessPeer_i0 = cuda::device::peer_to_peer::can_access(device_i, device_0);
if (canAccessPeer_0i and canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
auto device = cuda::device::get(s_mem[index].device).make_current();
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
std::vector<cuda::event_t> events;
events.reserve(MAX_DEVICES * PROCESSES_PER_DEVICE - 1);
int* d_ptr = reinterpret_cast<int*>(
device.memory().allocate(DATA_BUF_SIZE * g_processCount * sizeof(int)).start
);
s_mem[0].memHandle = cuda::memory::ipc::export_((void *) d_ptr);
cuda::memory::copy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
events.push_back(cuda::event::ipc::import(device, s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
cuda::synchronize(events[i-1]);
}
//-------------------------------------------
// b.3
procBarrier();
cuda::memory::copy(h_results, d_ptr + DATA_BUF_SIZE, DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int));
cuda::memory::device::free(d_ptr);
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
auto current_device = cuda::device::current::get();
auto event = cuda::event::create(
current_device,
cuda::event::sync_by_blocking,
cuda::event::dont_record_timings,
cuda::event::interprocess);
s_mem[index].eventHandle = cuda::event::ipc::export_(event);
// b.1: wait until proc 0 initializes device memory
procBarrier();
{
cuda::memory::ipc::imported_t<int> d_ptr(s_mem[0].memHandle);
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
cuda::launch(
simpleKernel,
{ blocks, threads },
d_ptr.get() + index *DATA_BUF_SIZE, d_ptr.get(), index + 1
);
event.record();
// b.2
procBarrier();
} // imported memory handle is closed
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
// the event is destroyed here
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
cuda::device::get(s_devices->ordinals[i]).synchronize();
}
printf("SUCCESS\n");
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
| 28782233d2e4a25f3a3fbbff46cbe29f6fad71f2.cu | /**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*/
#include "../helper_string.h"
#include <cuda/runtime_api.hpp>
#include <stdio.h>
#include <assert.h>
#include <vector>
// CUDA runtime includes
#include <cuda_runtime_api.h>
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
cudaIpcEventHandle_t eventHandle;
cudaIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
count = cuda::device::count();
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
auto prop = cuda::device::get(i).properties();
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != cudaComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
bool canAccessPeer_0i, canAccessPeer_i0;
auto device_0 = cuda::device::get(0);
for (i = 1; i < uvaCount; i++)
{
auto device_i = cuda::device::get(i);
canAccessPeer_0i = cuda::device::peer_to_peer::can_access(device_0, device_i);
canAccessPeer_i0 = cuda::device::peer_to_peer::can_access(device_i, device_0);
if (canAccessPeer_0i and canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
auto device = cuda::device::get(s_mem[index].device).make_current();
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
std::vector<cuda::event_t> events;
events.reserve(MAX_DEVICES * PROCESSES_PER_DEVICE - 1);
int* d_ptr = reinterpret_cast<int*>(
device.memory().allocate(DATA_BUF_SIZE * g_processCount * sizeof(int)).start
);
s_mem[0].memHandle = cuda::memory::ipc::export_((void *) d_ptr);
cuda::memory::copy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
events.push_back(cuda::event::ipc::import(device, s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
cuda::synchronize(events[i-1]);
}
//-------------------------------------------
// b.3
procBarrier();
cuda::memory::copy(h_results, d_ptr + DATA_BUF_SIZE, DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int));
cuda::memory::device::free(d_ptr);
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
auto current_device = cuda::device::current::get();
auto event = cuda::event::create(
current_device,
cuda::event::sync_by_blocking,
cuda::event::dont_record_timings,
cuda::event::interprocess);
s_mem[index].eventHandle = cuda::event::ipc::export_(event);
// b.1: wait until proc 0 initializes device memory
procBarrier();
{
cuda::memory::ipc::imported_t<int> d_ptr(s_mem[0].memHandle);
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
cuda::launch(
simpleKernel,
{ blocks, threads },
d_ptr.get() + index *DATA_BUF_SIZE, d_ptr.get(), index + 1
);
event.record();
// b.2
procBarrier();
} // imported memory handle is closed
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
// the event is destroyed here
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
cuda::device::get(s_devices->ordinals[i]).synchronize();
}
printf("SUCCESS\n");
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
|
44b25d4181cef0e4d23de1dfc351b78cc6eee82c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_scalarSub (int n, double *result, double x, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = x - y[id];
}
} | 44b25d4181cef0e4d23de1dfc351b78cc6eee82c.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_scalarSub (int n, double *result, double x, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = x - y[id];
}
} |
d038238c69a742756b624828ce2d1fe28bf1766d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <iostream>
#include "CUDACore/cudastdAlgorithm.h"
#include "CUDACore/launch.h"
__global__ void testBinaryFind() {
int data[] = {1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6};
auto lower = cuda_std::lower_bound(data, data + 13, 4);
auto upper = cuda_std::upper_bound(data, data + 12, 4);
assert(3 == upper - lower);
// classic binary search, returning a value only if it is present
constexpr int data2[] = {1, 2, 4, 6, 9, 10};
assert(data2 + 2 == cuda_std::binary_find(data2, data2 + 6, 4));
assert(data2 + 6 == cuda_std::binary_find(data2, data2 + 6, 5));
}
void wrapper() { cms::cuda::launch(testBinaryFind, {32, 64}); }
int main() { wrapper(); }
| d038238c69a742756b624828ce2d1fe28bf1766d.cu | #include <cassert>
#include <iostream>
#include "CUDACore/cudastdAlgorithm.h"
#include "CUDACore/launch.h"
__global__ void testBinaryFind() {
int data[] = {1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6};
auto lower = cuda_std::lower_bound(data, data + 13, 4);
auto upper = cuda_std::upper_bound(data, data + 12, 4);
assert(3 == upper - lower);
// classic binary search, returning a value only if it is present
constexpr int data2[] = {1, 2, 4, 6, 9, 10};
assert(data2 + 2 == cuda_std::binary_find(data2, data2 + 6, 4));
assert(data2 + 6 == cuda_std::binary_find(data2, data2 + 6, 5));
}
void wrapper() { cms::cuda::launch(testBinaryFind, {32, 64}); }
int main() { wrapper(); }
|
f2c8823b30f87bf481fcd7062156c02d0d5563d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define MAKE_NAME(prefix, fun, T) prefix ## _ ## fun ## _ ## T
#define MAP_FUN_1(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map, fun, T) (int length,\
T *out, int outStride,\
const T *in, int inStride) {\
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += blockDim.x * gridDim.x) {\
out[i * outStride] = fun(in[i * inStride]);\
}\
}
#define MAP_FUN_2(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map2, fun, T) (int length,\
T *out, int outStride,\
const T *a, int aStride,\
const T *b, int bStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
out[col * outStride] = fun(a[col * aStride], b[col * bStride]);\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_v_s, fun, T) (int length,\
T *out, int outStride,\
const T *a, int aStride,\
const T b) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
out[col * outStride] = fun(a[col * aStride], b);\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_s_v, fun, T) (int length,\
T *out, int outStride,\
const T a,\
const T *b, int bStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
out[col * outStride] = fun(a, b[col * bStride]);\
}\
}\
static __inline__ __device__ double shfl_down(double var, int delta, int width=warpSize)
{
int hi, lo;
asm volatile( "mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var) );
hi = __shfl_down( hi, delta, width );
lo = __shfl_down( lo, delta, width );
return __hiloint2double( hi, lo );
}
static __inline__ __device__ int shfl_down(int var, int delta, int width=warpSize)
{
return __shfl_down(var, delta, width);
}
static __inline__ __device__ unsigned int shfl_down(unsigned int var, int delta, int width=warpSize)
{
int x = __shfl_down(*(int*)&var, delta, width);
return *(unsigned int*)(&x);
}
static __inline__ __device__ float shfl_down(float var, int delta, int width=warpSize)
{
return __shfl_down(var, delta, width);
}
#define laneId (threadIdx.x & 0x1f)
#define REDUCE_FUN(fun, T, identity) \
/* Each column gets 1 block of threads. TODO currently blocksize must be 1 warp*/\
extern "C" \
__global__ void MAKE_NAME(reduce, fun, T) (int length,\
T *out,\
const T *in, int inStride) {\
\
T sum = identity;\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
sum = fun(sum, in[col * inStride]);\
}\
\
__syncthreads();\
for (int i = 1; i < blockDim.x; i *= 2) {\
T x = shfl_down(sum, i);\
sum = fun(sum, x);\
}\
\
if(laneId == 0) {\
out[blockIdx.x] = sum;\
}\
}
#include "function_decls.cuh"
| f2c8823b30f87bf481fcd7062156c02d0d5563d1.cu | #include <stdio.h>
#define MAKE_NAME(prefix, fun, T) prefix ## _ ## fun ## _ ## T
#define MAP_FUN_1(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map, fun, T) (int length,\
T *out, int outStride,\
const T *in, int inStride) {\
for(int i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += blockDim.x * gridDim.x) {\
out[i * outStride] = fun(in[i * inStride]);\
}\
}
#define MAP_FUN_2(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map2, fun, T) (int length,\
T *out, int outStride,\
const T *a, int aStride,\
const T *b, int bStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
out[col * outStride] = fun(a[col * aStride], b[col * bStride]);\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_v_s, fun, T) (int length,\
T *out, int outStride,\
const T *a, int aStride,\
const T b) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
out[col * outStride] = fun(a[col * aStride], b);\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_s_v, fun, T) (int length,\
T *out, int outStride,\
const T a,\
const T *b, int bStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
out[col * outStride] = fun(a, b[col * bStride]);\
}\
}\
static __inline__ __device__ double shfl_down(double var, int delta, int width=warpSize)
{
int hi, lo;
asm volatile( "mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var) );
hi = __shfl_down( hi, delta, width );
lo = __shfl_down( lo, delta, width );
return __hiloint2double( hi, lo );
}
static __inline__ __device__ int shfl_down(int var, int delta, int width=warpSize)
{
return __shfl_down(var, delta, width);
}
static __inline__ __device__ unsigned int shfl_down(unsigned int var, int delta, int width=warpSize)
{
int x = __shfl_down(*(int*)&var, delta, width);
return *(unsigned int*)(&x);
}
static __inline__ __device__ float shfl_down(float var, int delta, int width=warpSize)
{
return __shfl_down(var, delta, width);
}
#define laneId (threadIdx.x & 0x1f)
#define REDUCE_FUN(fun, T, identity) \
/* Each column gets 1 block of threads. TODO currently blocksize must be 1 warp*/\
extern "C" \
__global__ void MAKE_NAME(reduce, fun, T) (int length,\
T *out,\
const T *in, int inStride) {\
\
T sum = identity;\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < length; col += blockDim.x * gridDim.x) {\
sum = fun(sum, in[col * inStride]);\
}\
\
__syncthreads();\
for (int i = 1; i < blockDim.x; i *= 2) {\
T x = shfl_down(sum, i);\
sum = fun(sum, x);\
}\
\
if(laneId == 0) {\
out[blockIdx.x] = sum;\
}\
}
#include "function_decls.cuh"
|
7380a4cc26b3334c37c78ea2874821f759b76a0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THH.h>
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__global__ void LRNFillScale(const int nthreads, const float* in,
const int num, const int channels, const int height,
const int width, const int size, const float alpha_over_size,
const float k, float* scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
float accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
__global__ void LRNComputeOutput(const int nthreads, const float* in,
const float* scale, const float negative_beta, float* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
__global__ void LRNComputeDiff(const int nthreads, const float* bottom_data,
const float* top_data, const float* scale, const float* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const float negative_beta,
const float cache_ratio,
float* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
float accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
extern "C"
void LRNforward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* scale, int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_resizeAs(state, scale, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
int n_threads = batchSize * imsize_h * imsize_w;
hipLaunchKernelGGL(( LRNFillScale), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCudaTensor_data(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size,
alpha / local_size, k, THCudaTensor_data(state, scale));
n_threads *= nInputPlane;
hipLaunchKernelGGL(( LRNComputeOutput), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, scale), -beta, THCudaTensor_data(state, output));
THCudaTensor_free(state, input);
}
extern "C"
void LRNbackward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* gradOutput, THCudaTensor* gradInput, THCudaTensor* scale,
int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, gradInput, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int n_threads = batchSize * imsize_h * imsize_w;
hipLaunchKernelGGL(( LRNComputeDiff), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, output),
THCudaTensor_data(state, scale), THCudaTensor_data(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w,
local_size, -beta, float(2. * alpha * beta / local_size),
THCudaTensor_data(state, gradInput));
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
| 7380a4cc26b3334c37c78ea2874821f759b76a0f.cu | #include <THC/THC.h>
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__global__ void LRNFillScale(const int nthreads, const float* in,
const int num, const int channels, const int height,
const int width, const int size, const float alpha_over_size,
const float k, float* scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
float accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
__global__ void LRNComputeOutput(const int nthreads, const float* in,
const float* scale, const float negative_beta, float* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
__global__ void LRNComputeDiff(const int nthreads, const float* bottom_data,
const float* top_data, const float* scale, const float* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const float negative_beta,
const float cache_ratio,
float* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
float accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
extern "C"
void LRNforward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* scale, int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_resizeAs(state, scale, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
int n_threads = batchSize * imsize_h * imsize_w;
LRNFillScale<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCudaTensor_data(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size,
alpha / local_size, k, THCudaTensor_data(state, scale));
n_threads *= nInputPlane;
LRNComputeOutput<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, scale), -beta, THCudaTensor_data(state, output));
THCudaTensor_free(state, input);
}
extern "C"
void LRNbackward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* gradOutput, THCudaTensor* gradInput, THCudaTensor* scale,
int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, gradInput, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int n_threads = batchSize * imsize_h * imsize_w;
LRNComputeDiff<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, output),
THCudaTensor_data(state, scale), THCudaTensor_data(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w,
local_size, -beta, float(2. * alpha * beta / local_size),
THCudaTensor_data(state, gradInput));
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
|
f4a379e8ca1b3b88b0ad12acc4f8c2721705db74.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_cos.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_cos), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_cos), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_cos), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f4a379e8ca1b3b88b0ad12acc4f8c2721705db74.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_cos.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_cos<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_cos<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_cos<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
211d3507968bd391def4f4c4128dd5bcfcf98080.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************/
/* CUDA based TLED Solver */
/* {c} 2008-2010 Karsten Noe */
/* The Alexandra Institute */
/* See our blog on cg.alexandra.dk */
/***************************************************************************/
#define BLOCKSIZE 128
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <hipfft.h>
#include <cutil.h>
#include <cuda_gl_interop.h>
#include <hip/hip_runtime.h>
#include "Visualization_kernels.cu"
#include "FEM_kernels.cu"
#include "TetrahedralMesh.h"
#include "Vector3D.h"
#define NUM_VBO 10
//this is a bit of a hack
//a maximum of NUM_VBO elements can be displayed
static GLuint vbo[NUM_VBO] = {0,0,0,0,0,0,0,0,0,0};
static GLuint normalVbo[NUM_VBO] = {0,0,0,0,0,0,0,0,0,0};
namespace TLED {
TriangleSurface* loadSurfaceOBJ(const char* filename)
{
//todo: do a pass to check how large a buffer is needed;
FILE * pFile;
int numTriangles = 0;
pFile = fopen(filename,"r");
if (!pFile) return NULL;
unsigned char c;
while (!feof(pFile))
{
fscanf (pFile, "%c", &c);
if ( c == 'f' || c == 'F')
numTriangles++;
}
Triangle* triangles = (Triangle*) malloc(numTriangles*sizeof(Triangle));
numTriangles = 0;
fclose (pFile);
pFile = fopen(filename,"r");
if (!pFile) return NULL;
while (!feof(pFile))
{
fscanf (pFile, "%c", &c);
float tmp;
switch (c)
{
case 'v':
case 'V':
fscanf (pFile, "%f %f %f\n", &(tmp), &(tmp), &(tmp));
break;
case 'f':
case 'F':
fscanf (pFile, " %i %i %i", &(triangles[numTriangles].x), &(triangles[numTriangles].y), &(triangles[numTriangles].z));
//printf (" %i %i %i\n", (triangles[numTriangles].x), (triangles[numTriangles].y), (triangles[numTriangles].z));
numTriangles++;
break;
default: break; //printf("Unknown tag '%c' found in OBJ file\n", c);
}
}
fclose (pFile);
TriangleSurface *surface = (TriangleSurface*) malloc(sizeof(TriangleSurface));
hipMalloc((void**)&(surface->faces), sizeof(Triangle) *numTriangles);
hipMemcpy((surface->faces), triangles, sizeof(Triangle) *numTriangles, hipMemcpyHostToDevice);
surface->numFaces = numTriangles;
printf("Number of triangles: %i\n", surface->numFaces );
free(triangles);
return surface;
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void
createVBO(GLuint* vbo, int numTriangles)
{
// create buffer object
glGenBuffers( 1, vbo);
glBindBuffer( GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = numTriangles* 3 * sizeof(float3);
glBufferData( GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW);
glBindBuffer( GL_ARRAY_BUFFER, 0);
// register buffer object with CUDA
CUDA_SAFE_CALL(hipGLRegisterBufferObject(*vbo));
// CUT_CHECK_ERROR_GL();
}
void drawCoordinates(void)
{
//Draw coordinate axes
glEnable(GL_BLEND);
glEnable(GL_DEPTH_TEST);
//glTranslatef(0,-8,0);
glBegin(GL_LINES);
glColor3f(1,0,0);
glVertex3f(0,0,0);
glVertex3f(2,0,0);
glColor3f(0,1,0);
glVertex3f(0,0,0);
glVertex3f(0,2,0);
glColor3f(0,0,1);
glVertex3f(0,0,0);
glVertex3f(0,0,2);
glEnd();
//glTranslatef(0,8,0);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
}
void drawTrianglesFromVBO(GLuint vbo, GLuint normalVbo, int numTriangles, float4 color)
{
float4 ambcolor;
ambcolor.x = 1.0f * color.x;
ambcolor.y = 1.0f * color.y;
ambcolor.z = 1.0f * color.z;
ambcolor.w = 0.1f * color.w;
glMaterialfv(GL_FRONT, GL_DIFFUSE, (GLfloat*)&color);
glMaterialfv(GL_FRONT, GL_AMBIENT, (GLfloat*)&ambcolor);
glShadeModel(GL_FLAT);
glEnable(GL_DEPTH_TEST);
glEnable(GL_AUTO_NORMAL);
glEnable(GL_NORMALIZE);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER, normalVbo);
glNormalPointer(GL_FLOAT, sizeof(float)*3, 0);
glEnableClientState(GL_NORMAL_ARRAY);
glDrawArrays(GL_TRIANGLES, 0,numTriangles * 3);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisable(GL_DEPTH_TEST);
}
void cleanupDisplay(void) {
printf("Deleting VBOs \n");
for (int i = 0; i<NUM_VBO; i++ )
hipGLUnregisterBufferObject(vbo[i]);
CUT_CHECK_ERROR("hipGLUnregisterBufferObject failed");
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
for (int i = 0; i<NUM_VBO; i++ )
glDeleteBuffersARB(1, &vbo[i]);
for (int i = 0; i<NUM_VBO; i++ )
glDeleteBuffersARB(1, &normalVbo[i]);
printf("Exiting...\n");
//exit(0);
}
void display(unsigned int object_number, TetrahedralMesh* mesh, TetrahedralTLEDState *state, TriangleSurface* surface) {
int gridSize = (int)ceil(((float)surface->numFaces)/BLOCKSIZE);
if (vbo[object_number] == 0)
createVBO(&(vbo[object_number]), surface->numFaces);
if (normalVbo[object_number] == 0)
createVBO(&(normalVbo[object_number]), surface->numFaces);
float3 *d_pos, *d_normal;
CUDA_SAFE_CALL(hipGLMapBufferObject__( (void**)&d_pos, vbo[object_number]));
CUDA_SAFE_CALL(hipGLMapBufferObject__( (void**)&d_normal, normalVbo[object_number]));
//extractSurfaceWithDisplacements_k<<<make_uint3(tetSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(dptr, mesh->tetrahedra, mesh->points, state->Ui_t);
hipLaunchKernelGGL(( updateSurfacePositionsFromDisplacements_k), dim3(make_uint3(gridSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, d_pos, d_normal, *surface, mesh->points, state->Ui_t);
CUT_CHECK_ERROR("Error extracting surface");
// unmap buffer object
CUDA_SAFE_CALL(hipGLUnmapBufferObject( vbo[object_number]));
CUDA_SAFE_CALL(hipGLUnmapBufferObject( normalVbo[object_number]));
float4 color;
switch (object_number)
{
case 0 : color = make_float4(1,0,0,0.5); break;
case 1 : color = make_float4(0,0.5,0.5,0.5); break;
case 2 : color = make_float4(0,0,1,0.5); break;
case 3 : color = make_float4(0.5,0.5,0,0.5); break;
case 4 : color = make_float4(1,0,0,1); break;
case 5 : color = make_float4(1,0,0,1); break;
case 6 : color = make_float4(1,0,0,1); break;
case 7 : color = make_float4(1,0,0,1); break;
case 8 : color = make_float4(1,0,0,1); break;
case 9 : color = make_float4(1,0,0,1); break;
}
drawTrianglesFromVBO(vbo[object_number], normalVbo[object_number], surface->numFaces, color);
// drawCoordinates();
}
Tetrahedron fixTetrahedronOrientation(Tetrahedron tet, Point *hpoints)
{
Tetrahedron res;
//the x,y,z and w points are called a,b,c and d
res = tet;
Vector3D a = crop_last_dim(hpoints[tet.x]);
Vector3D b = crop_last_dim(hpoints[tet.y]);
Vector3D c = crop_last_dim(hpoints[tet.z]);
Vector3D d = crop_last_dim(hpoints[tet.w]);
Vector3D ab = b-a;
Vector3D ac = c-a;
Vector3D ad = d-a;
Vector3D abxac = cross(ab,ac);
float projection = dot(abxac, ad);
if (projection<0)
{
// printf("Switching a and b\n");
res.x = tet.y;
res.y = tet.x;
}
return res;
}
//must return smallest length encountered
float CPUPrecalculation(TetrahedralMesh *mesh, int blockSize, int& return_maxNumForces, float density, float smallestAllowedVolume, float smallestAllowedLength)
{
float totalSmallestLengthSquared = 9e9f;
double totalVolume = 0.0;
Tetrahedron *htetrahedra = (Tetrahedron*)malloc(sizeof(Tetrahedron) * mesh->numTetrahedra);
Point *hpoints = (Point*)malloc(sizeof(Point) * mesh->numPoints);
//copy data structure back and compact
hipMemcpy(hpoints, mesh->points, sizeof(Point) * mesh->numPoints, hipMemcpyDeviceToHost);
hipMemcpy(htetrahedra, mesh->tetrahedra, sizeof(Tetrahedron) * mesh->numTetrahedra, hipMemcpyDeviceToHost);
int tmpPointCount = mesh->numPoints;
float* mass = (float*)malloc(sizeof(float) * tmpPointCount);
memset(mass, 0, sizeof(float) * tmpPointCount);
for (int i = 0; i < mesh->numTetrahedra; i++)
{
if (htetrahedra[i].x >= 0)
{
htetrahedra[i] = fixTetrahedronOrientation(htetrahedra[i],hpoints);
}
}
int tmpTetCount = mesh->numTetrahedra;
int4* writeIndices = (int4*)malloc(sizeof(int4) * tmpTetCount);
mesh->numWriteIndices = tmpTetCount;
Tetrahedron* tets = (Tetrahedron*)malloc(sizeof(Tetrahedron) * tmpTetCount);
float* initialVolume = (float*)malloc(sizeof(float) * tmpTetCount);
int* numForces = (int*)malloc(sizeof(int) * tmpPointCount);// the number of force contributions (num elements) for each node
int maxNumForces = 0; // the maximum number of force contributions (num elements) for any node
for (int i = 0; i < tmpPointCount; i++)
{
numForces[i] = 0;
}
int counter = 0;
for (int i = 0; i < mesh->numTetrahedra; i++)
{
if (htetrahedra[i].x >= 0 && htetrahedra[i].y >= 0 && htetrahedra[i].z >= 0 && htetrahedra[i].w >= 0)
{
tets[counter].x = htetrahedra[i].x;
tets[counter].y = htetrahedra[i].y;
tets[counter].z = htetrahedra[i].z;
tets[counter].w = htetrahedra[i].w;
writeIndices[counter].x = numForces[htetrahedra[i].x]++;
if (writeIndices[counter].x+1 > maxNumForces)
maxNumForces = writeIndices[counter].x+1;
writeIndices[counter].y = numForces[htetrahedra[i].y]++;
if (writeIndices[counter].y+1 > maxNumForces)
maxNumForces = writeIndices[counter].y+1;
writeIndices[counter].z = numForces[htetrahedra[i].z]++;
if (writeIndices[counter].z+1 > maxNumForces)
maxNumForces = writeIndices[counter].z+1;
writeIndices[counter].w = numForces[htetrahedra[i].w]++;
if (writeIndices[counter].w+1 > maxNumForces)
maxNumForces = writeIndices[counter].w+1;
// calculate volume and smallest length
Vector3D a = crop_last_dim(hpoints[htetrahedra[i].x]);
Vector3D b = crop_last_dim(hpoints[htetrahedra[i].y]);
Vector3D c = crop_last_dim(hpoints[htetrahedra[i].z]);
Vector3D d = crop_last_dim(hpoints[htetrahedra[i].w]);
Vector3D ab = b-a; // these 3 are used for volume calculation
Vector3D ac = c-a;
Vector3D ad = d-a;
Vector3D bc = c-b;
Vector3D cd = d-c;
Vector3D bd = d-a;
float smallestLengthSquared = ab.squaredLength();
float sql = ac.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = ad.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = bc.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = cd.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = bd.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
if (smallestLengthSquared <smallestAllowedLength*smallestAllowedLength)
{
continue;
}
if (smallestLengthSquared<totalSmallestLengthSquared)
totalSmallestLengthSquared = smallestLengthSquared;
Vector3D cross_product = cross(ab,ac);
float cross_length = cross_product.length();
//Length of vector ad projected onto cross product normal
float projected_length = dot(ad, cross_product/cross_length);
float volume = (1.0f/6.0f) * projected_length*cross_length;
if (volume<smallestAllowedVolume)
{
continue;
}
totalVolume += volume;
/*
static float smallestvolume = 100000;
if (volume<smallestvolume)
{
smallestvolume=volume;
printf("smallest element volume: %g\n", smallestvolume);
}
static float largestvolume = 0;
if (volume>largestvolume)
{
largestvolume=volume;
printf("largest element volume: %g\n", largestvolume);
}
*/
// printf("volume: %g\n", volume);
initialVolume[counter] = volume;
// if (volume<0.1)
// printf("volume: %f \n",volume);
mass[htetrahedra[i].x] += volume * 0.25f * density;
mass[htetrahedra[i].y] += volume * 0.25f * density;
mass[htetrahedra[i].z] += volume * 0.25f * density;
mass[htetrahedra[i].w] += volume * 0.25f * density;
counter++;
}
}
// these are the padded ones
for (int i = counter; i < tmpTetCount; i++)
{
tets[i].x = -1;
tets[i].y = -1;
tets[i].z = -1;
tets[i].w = -1;
}
printf("Total volume: %f\n", totalVolume);
mesh->numTetrahedra = tmpTetCount;
//copy points to a padded array
hipFree(mesh->points);
hipMalloc((void**)&(mesh->points), sizeof(Point) * tmpPointCount);
hipMemcpy(mesh->points, hpoints, sizeof(Point) * mesh->numPoints, hipMemcpyHostToDevice);
mesh->numPoints = tmpPointCount;
free(hpoints);
free(htetrahedra);
// free(pointMap);
free(numForces);
hipFree(mesh->tetrahedra);
// for (int i=0; i<mesh->numPoints; i++)
// {
// printf("Vertex %i: %f, %f, %f\n", i, (points[i].x), (points[i].y), (points[i].z));
// }
hipMalloc((void**)&(mesh->tetrahedra), sizeof(Tetrahedron) * mesh->numTetrahedra);
hipMalloc((void**)&(mesh->writeIndices), sizeof(int4) * mesh->numWriteIndices);
hipMalloc((void**)&(mesh->volume), sizeof(float) * mesh->numTetrahedra);
hipMalloc((void**)&(mesh->mass), sizeof(float) * mesh->numPoints);
hipMemcpy(mesh->tetrahedra, tets, sizeof(Tetrahedron) * mesh->numTetrahedra, hipMemcpyHostToDevice);
hipMemcpy(mesh->writeIndices, writeIndices, sizeof(int4) * mesh->numWriteIndices, hipMemcpyHostToDevice);
hipMemcpy(mesh->volume, initialVolume, sizeof(float) * mesh->numTetrahedra, hipMemcpyHostToDevice);
hipMemcpy(mesh->mass, mass, sizeof(float) * mesh->numPoints, hipMemcpyHostToDevice);
/*
for (int i = 0; i < tmpPointCount; i++)
{
if (mass[i] == 0)
{
printf("warning: point without mass detected\n");
}
}
*/
// for (int i = 0; i < mesh->numWriteIndices; i++)
// {
// printf("%i, %i, %i, %i \n", writeIndices[i].x, writeIndices[i].y, writeIndices[i].z, writeIndices[i].w );
// }
CUT_CHECK_ERROR("Error deleting");
free(tets);
free(initialVolume);
free(writeIndices);
free(mass);
return_maxNumForces = maxNumForces;
return sqrtf(totalSmallestLengthSquared);
}
TetrahedralMesh* allocAndCopyMesh(Tetrahedron* hTets, int numTetrahedra, Point* hPoints, int numVertices)
{
Point *dPoints;
Tetrahedron *dTets;
hipMalloc((void**)&dPoints, sizeof(Point) *numVertices);
hipMalloc((void**)&dTets, sizeof(Tetrahedron)*numTetrahedra);
hipMemcpy(dPoints, hPoints, sizeof(Point) *numVertices, hipMemcpyHostToDevice);
hipMemcpy(dTets, hTets, sizeof(Tetrahedron)*numTetrahedra , hipMemcpyHostToDevice);
TetrahedralMesh* mesh = (TetrahedralMesh *) malloc(sizeof(TetrahedralMesh));
mesh->points = dPoints;
mesh->numPoints = numVertices;
mesh->tetrahedra = dTets;
mesh->numTetrahedra = numTetrahedra;
printf("Number of points: %i\n", mesh->numPoints);
printf("Number of tetrahedra: %i\n", mesh->numTetrahedra );
return mesh;
}
TetrahedralMesh* loadMesh(const char* filename)
{
FILE * pFile;
pFile = fopen(filename,"r");
if (!pFile) return NULL;
int numVertices;
int numTetrahedra;
fscanf (pFile, "%i\n", &numVertices);
fscanf (pFile, "%i\n", &numTetrahedra);
Tetrahedron* hTets = (Tetrahedron*) malloc(numTetrahedra*sizeof(Tetrahedron));
Point* hPoints = (Point*) malloc(numVertices*sizeof(Point));
for (int i=0; i<numVertices && !feof(pFile); i++)
{
Point newPoint;
fscanf (pFile, "%f %f %f\n", &(newPoint.x), &(newPoint.y), &(newPoint.z));
//printf("New vertex at %f, %f, %f\n", (newPoint.x), (newPoint.y), (newPoint.z));
/* newPoint.x *= 0.001;
newPoint.y *= 0.001;
newPoint.z *= 0.001;
*/
hPoints[i] = newPoint;
}
for (int i=0; i<numTetrahedra && !feof(pFile); i++)
{
Tetrahedron newTet;
fscanf (pFile, "%i %i %i %i\n", &(newTet.x), &(newTet.y), &(newTet.z), &(newTet.w));
//printf("New tetrahedron: %i, %i, %i, %i\n", (newTet.x), (newTet.y), (newTet.z), (newTet.w));
hTets[i]=newTet;
}
fclose (pFile);
TetrahedralMesh* mesh = allocAndCopyMesh(hTets, numTetrahedra, hPoints, numVertices);
free(hPoints);
free(hTets);
return mesh;
}
void copyStateToHost(TetrahedralTLEDState* state, TetrahedralMesh* mesh, Point* hPoints){
Point *dPoints = state->Ui_t;
hipMemcpy(hPoints, dPoints, sizeof(Point) *mesh->numPoints, hipMemcpyDeviceToHost);
}
void copyStateToDevice(TetrahedralTLEDState* state, TetrahedralMesh* mesh, Point* hPoints){
Point *dPoints = state->Ui_t;
hipMemcpy(dPoints, hPoints, sizeof(Point) *mesh->numPoints, hipMemcpyHostToDevice);
}
void calculateGravityForces(TetrahedralMesh* mesh, TetrahedralTLEDState *state)
{
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
hipLaunchKernelGGL(( calculateDrivingForces_k), dim3(make_uint3(pointSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, mesh->points, mesh->mass, state->externalForces, mesh->numPoints);
}
void applyFloorConstraint(TetrahedralMesh* mesh, TetrahedralTLEDState *state, float floorZPosition)
{
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
hipLaunchKernelGGL(( applyGroundConstraint_k), dim3(make_uint3(pointSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, mesh->points, state->Ui_t, state->Ui_tminusdt, floorZPosition, mesh->numPoints);
}
void calculateInternalForces(TetrahedralMesh* mesh, TetrahedralTLEDState *state)
{
int tetSize = (int)ceil(((float)mesh->numTetrahedra)/BLOCKSIZE);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
hipBindTexture( 0, Ui_t_1d_tex, state->Ui_t, channelDesc );
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float>();
hipBindTexture( 0, V0_1d_tex, mesh->volume, channelDesc2 );
hipLaunchKernelGGL(( calculateForces_k), dim3(make_uint3(tetSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, (Matrix4x3 *)state->shape_function_deriv, mesh->tetrahedra, state->Ui_t, mesh->volume, mesh->writeIndices, state->pointForces, state->maxNumForces, state->mu, state->lambda, mesh->numTetrahedra);
hipUnbindTexture( V0_1d_tex );
hipUnbindTexture( Ui_t_1d_tex );
}
void doTimeStep(TetrahedralMesh* mesh, TetrahedralTLEDState *state)
{
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
calculateInternalForces(mesh, state);
hipLaunchKernelGGL(( updateDisplacements_k), dim3(make_uint3(pointSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, state->Ui_t, state->Ui_tminusdt, mesh->mass, state->externalForces, state->pointForces, state->maxNumForces, state->ABC, mesh->numPoints);
float4 *temp = state->Ui_t;
state->Ui_t = state->Ui_tminusdt;
state->Ui_tminusdt = temp;
}
void precompute(TetrahedralMesh* mesh, TetrahedralTLEDState *state,
float density, float smallestAllowedVolume, float smallestAllowedLength,
float mu, float lambda, float timeStepFactor, float damping)
{
float smallestLength = CPUPrecalculation(mesh, BLOCKSIZE, state->maxNumForces, density, smallestAllowedVolume, smallestAllowedLength);
float nu = lambda/(2.0f*(lambda+mu));
float E = mu*(3.0f*lambda+2.0f*mu)/(lambda+mu);
float c = sqrt((E*(1.0f-nu))/(density*(1.0f-nu)*(1.0f-2.0f*nu)));
// the factor is to account for changes i c during deformation
float timeStep = timeStepFactor*smallestLength/c;
//float timeStep = 0.0001f;
printf("precompute: number of tetrahedra :%i \n",mesh->numTetrahedra);
state->mu = mu;
state->lambda = lambda;
int tetSize = (int)ceil(((float)mesh->numTetrahedra)/BLOCKSIZE);
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
hipMalloc((void**)&(state->ABC), sizeof(float4) * mesh->numPoints);
hipMalloc((void**)&(state->Ui_t), sizeof(float4) * mesh->numPoints);
hipMalloc((void**)&(state->Ui_tminusdt), sizeof(float4) * mesh->numPoints);
hipMalloc((void**)&(state->pointForces), state->maxNumForces * sizeof(float4) * mesh->numPoints);
hipMalloc((void**)&(state->externalForces), sizeof(float4) * mesh->numPoints);
hipError_t err = hipGetLastError();
if( err != hipSuccess ){
printf("\nCuda error detected in 'TLEDSolver::precompute': %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout);
exit(1);
}
hipMemset(state->pointForces, 0, sizeof(float4) * state->maxNumForces * mesh->numPoints);
hipMemset(state->externalForces, 0, sizeof(float4) * mesh->numPoints);
hipMemset(state->Ui_t, 0, sizeof(float4) * mesh->numPoints);
hipMemset(state->Ui_tminusdt, 0, sizeof(float4) * mesh->numPoints);
hipLaunchKernelGGL(( precalculateABC), dim3(make_uint3(pointSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, state->ABC, mesh->mass, timeStep, damping, mesh->numPoints);
hipMalloc((void**)&(state->shape_function_deriv), sizeof(ShapeFunctionDerivatives) * mesh->numTetrahedra);
hipLaunchKernelGGL(( precalculateShapeFunctionDerivatives_k), dim3(make_uint3(tetSize,1,1)), dim3(make_uint3(BLOCKSIZE,1,1)), 0, 0, state->shape_function_deriv,
mesh->tetrahedra, mesh->points, mesh->numTetrahedra);
err = hipGetLastError();
if( err != hipSuccess ){
printf("\nCuda error detected in 'TLEDSolver::precompute': %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout);
exit(1);
}
state->timeStep = timeStep;
}
}
| 211d3507968bd391def4f4c4128dd5bcfcf98080.cu | /***************************************************************************/
/* CUDA based TLED Solver */
/* {c} 2008-2010 Karsten Noe */
/* The Alexandra Institute */
/* See our blog on cg.alexandra.dk */
/***************************************************************************/
#define BLOCKSIZE 128
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <cufft.h>
#include <cutil.h>
#include <cuda_gl_interop.h>
#include <cuda.h>
#include "Visualization_kernels.cu"
#include "FEM_kernels.cu"
#include "TetrahedralMesh.h"
#include "Vector3D.h"
#define NUM_VBO 10
//this is a bit of a hack
//a maximum of NUM_VBO elements can be displayed
static GLuint vbo[NUM_VBO] = {0,0,0,0,0,0,0,0,0,0};
static GLuint normalVbo[NUM_VBO] = {0,0,0,0,0,0,0,0,0,0};
namespace TLED {
TriangleSurface* loadSurfaceOBJ(const char* filename)
{
//todo: do a pass to check how large a buffer is needed;
FILE * pFile;
int numTriangles = 0;
pFile = fopen(filename,"r");
if (!pFile) return NULL;
unsigned char c;
while (!feof(pFile))
{
fscanf (pFile, "%c", &c);
if ( c == 'f' || c == 'F')
numTriangles++;
}
Triangle* triangles = (Triangle*) malloc(numTriangles*sizeof(Triangle));
numTriangles = 0;
fclose (pFile);
pFile = fopen(filename,"r");
if (!pFile) return NULL;
while (!feof(pFile))
{
fscanf (pFile, "%c", &c);
float tmp;
switch (c)
{
case 'v':
case 'V':
fscanf (pFile, "%f %f %f\n", &(tmp), &(tmp), &(tmp));
break;
case 'f':
case 'F':
fscanf (pFile, " %i %i %i", &(triangles[numTriangles].x), &(triangles[numTriangles].y), &(triangles[numTriangles].z));
//printf (" %i %i %i\n", (triangles[numTriangles].x), (triangles[numTriangles].y), (triangles[numTriangles].z));
numTriangles++;
break;
default: break; //printf("Unknown tag '%c' found in OBJ file\n", c);
}
}
fclose (pFile);
TriangleSurface *surface = (TriangleSurface*) malloc(sizeof(TriangleSurface));
cudaMalloc((void**)&(surface->faces), sizeof(Triangle) *numTriangles);
cudaMemcpy((surface->faces), triangles, sizeof(Triangle) *numTriangles, cudaMemcpyHostToDevice);
surface->numFaces = numTriangles;
printf("Number of triangles: %i\n", surface->numFaces );
free(triangles);
return surface;
}
////////////////////////////////////////////////////////////////////////////////
//! Create VBO
////////////////////////////////////////////////////////////////////////////////
void
createVBO(GLuint* vbo, int numTriangles)
{
// create buffer object
glGenBuffers( 1, vbo);
glBindBuffer( GL_ARRAY_BUFFER, *vbo);
// initialize buffer object
unsigned int size = numTriangles* 3 * sizeof(float3);
glBufferData( GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW);
glBindBuffer( GL_ARRAY_BUFFER, 0);
// register buffer object with CUDA
CUDA_SAFE_CALL(cudaGLRegisterBufferObject(*vbo));
// CUT_CHECK_ERROR_GL();
}
void drawCoordinates(void)
{
//Draw coordinate axes
glEnable(GL_BLEND);
glEnable(GL_DEPTH_TEST);
//glTranslatef(0,-8,0);
glBegin(GL_LINES);
glColor3f(1,0,0);
glVertex3f(0,0,0);
glVertex3f(2,0,0);
glColor3f(0,1,0);
glVertex3f(0,0,0);
glVertex3f(0,2,0);
glColor3f(0,0,1);
glVertex3f(0,0,0);
glVertex3f(0,0,2);
glEnd();
//glTranslatef(0,8,0);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
}
void drawTrianglesFromVBO(GLuint vbo, GLuint normalVbo, int numTriangles, float4 color)
{
float4 ambcolor;
ambcolor.x = 1.0f * color.x;
ambcolor.y = 1.0f * color.y;
ambcolor.z = 1.0f * color.z;
ambcolor.w = 0.1f * color.w;
glMaterialfv(GL_FRONT, GL_DIFFUSE, (GLfloat*)&color);
glMaterialfv(GL_FRONT, GL_AMBIENT, (GLfloat*)&ambcolor);
glShadeModel(GL_FLAT);
glEnable(GL_DEPTH_TEST);
glEnable(GL_AUTO_NORMAL);
glEnable(GL_NORMALIZE);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER, normalVbo);
glNormalPointer(GL_FLOAT, sizeof(float)*3, 0);
glEnableClientState(GL_NORMAL_ARRAY);
glDrawArrays(GL_TRIANGLES, 0,numTriangles * 3);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisable(GL_DEPTH_TEST);
}
void cleanupDisplay(void) {
printf("Deleting VBOs \n");
for (int i = 0; i<NUM_VBO; i++ )
cudaGLUnregisterBufferObject(vbo[i]);
CUT_CHECK_ERROR("cudaGLUnregisterBufferObject failed");
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
for (int i = 0; i<NUM_VBO; i++ )
glDeleteBuffersARB(1, &vbo[i]);
for (int i = 0; i<NUM_VBO; i++ )
glDeleteBuffersARB(1, &normalVbo[i]);
printf("Exiting...\n");
//exit(0);
}
void display(unsigned int object_number, TetrahedralMesh* mesh, TetrahedralTLEDState *state, TriangleSurface* surface) {
int gridSize = (int)ceil(((float)surface->numFaces)/BLOCKSIZE);
if (vbo[object_number] == 0)
createVBO(&(vbo[object_number]), surface->numFaces);
if (normalVbo[object_number] == 0)
createVBO(&(normalVbo[object_number]), surface->numFaces);
float3 *d_pos, *d_normal;
CUDA_SAFE_CALL(cudaGLMapBufferObject( (void**)&d_pos, vbo[object_number]));
CUDA_SAFE_CALL(cudaGLMapBufferObject( (void**)&d_normal, normalVbo[object_number]));
//extractSurfaceWithDisplacements_k<<<make_uint3(tetSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(dptr, mesh->tetrahedra, mesh->points, state->Ui_t);
updateSurfacePositionsFromDisplacements_k<<<make_uint3(gridSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(d_pos, d_normal, *surface, mesh->points, state->Ui_t);
CUT_CHECK_ERROR("Error extracting surface");
// unmap buffer object
CUDA_SAFE_CALL(cudaGLUnmapBufferObject( vbo[object_number]));
CUDA_SAFE_CALL(cudaGLUnmapBufferObject( normalVbo[object_number]));
float4 color;
switch (object_number)
{
case 0 : color = make_float4(1,0,0,0.5); break;
case 1 : color = make_float4(0,0.5,0.5,0.5); break;
case 2 : color = make_float4(0,0,1,0.5); break;
case 3 : color = make_float4(0.5,0.5,0,0.5); break;
case 4 : color = make_float4(1,0,0,1); break;
case 5 : color = make_float4(1,0,0,1); break;
case 6 : color = make_float4(1,0,0,1); break;
case 7 : color = make_float4(1,0,0,1); break;
case 8 : color = make_float4(1,0,0,1); break;
case 9 : color = make_float4(1,0,0,1); break;
}
drawTrianglesFromVBO(vbo[object_number], normalVbo[object_number], surface->numFaces, color);
// drawCoordinates();
}
Tetrahedron fixTetrahedronOrientation(Tetrahedron tet, Point *hpoints)
{
Tetrahedron res;
//the x,y,z and w points are called a,b,c and d
res = tet;
Vector3D a = crop_last_dim(hpoints[tet.x]);
Vector3D b = crop_last_dim(hpoints[tet.y]);
Vector3D c = crop_last_dim(hpoints[tet.z]);
Vector3D d = crop_last_dim(hpoints[tet.w]);
Vector3D ab = b-a;
Vector3D ac = c-a;
Vector3D ad = d-a;
Vector3D abxac = cross(ab,ac);
float projection = dot(abxac, ad);
if (projection<0)
{
// printf("Switching a and b\n");
res.x = tet.y;
res.y = tet.x;
}
return res;
}
//must return smallest length encountered
float CPUPrecalculation(TetrahedralMesh *mesh, int blockSize, int& return_maxNumForces, float density, float smallestAllowedVolume, float smallestAllowedLength)
{
float totalSmallestLengthSquared = 9e9f;
double totalVolume = 0.0;
Tetrahedron *htetrahedra = (Tetrahedron*)malloc(sizeof(Tetrahedron) * mesh->numTetrahedra);
Point *hpoints = (Point*)malloc(sizeof(Point) * mesh->numPoints);
//copy data structure back and compact
cudaMemcpy(hpoints, mesh->points, sizeof(Point) * mesh->numPoints, cudaMemcpyDeviceToHost);
cudaMemcpy(htetrahedra, mesh->tetrahedra, sizeof(Tetrahedron) * mesh->numTetrahedra, cudaMemcpyDeviceToHost);
int tmpPointCount = mesh->numPoints;
float* mass = (float*)malloc(sizeof(float) * tmpPointCount);
memset(mass, 0, sizeof(float) * tmpPointCount);
for (int i = 0; i < mesh->numTetrahedra; i++)
{
if (htetrahedra[i].x >= 0)
{
htetrahedra[i] = fixTetrahedronOrientation(htetrahedra[i],hpoints);
}
}
int tmpTetCount = mesh->numTetrahedra;
int4* writeIndices = (int4*)malloc(sizeof(int4) * tmpTetCount);
mesh->numWriteIndices = tmpTetCount;
Tetrahedron* tets = (Tetrahedron*)malloc(sizeof(Tetrahedron) * tmpTetCount);
float* initialVolume = (float*)malloc(sizeof(float) * tmpTetCount);
int* numForces = (int*)malloc(sizeof(int) * tmpPointCount);// the number of force contributions (num elements) for each node
int maxNumForces = 0; // the maximum number of force contributions (num elements) for any node
for (int i = 0; i < tmpPointCount; i++)
{
numForces[i] = 0;
}
int counter = 0;
for (int i = 0; i < mesh->numTetrahedra; i++)
{
if (htetrahedra[i].x >= 0 && htetrahedra[i].y >= 0 && htetrahedra[i].z >= 0 && htetrahedra[i].w >= 0)
{
tets[counter].x = htetrahedra[i].x;
tets[counter].y = htetrahedra[i].y;
tets[counter].z = htetrahedra[i].z;
tets[counter].w = htetrahedra[i].w;
writeIndices[counter].x = numForces[htetrahedra[i].x]++;
if (writeIndices[counter].x+1 > maxNumForces)
maxNumForces = writeIndices[counter].x+1;
writeIndices[counter].y = numForces[htetrahedra[i].y]++;
if (writeIndices[counter].y+1 > maxNumForces)
maxNumForces = writeIndices[counter].y+1;
writeIndices[counter].z = numForces[htetrahedra[i].z]++;
if (writeIndices[counter].z+1 > maxNumForces)
maxNumForces = writeIndices[counter].z+1;
writeIndices[counter].w = numForces[htetrahedra[i].w]++;
if (writeIndices[counter].w+1 > maxNumForces)
maxNumForces = writeIndices[counter].w+1;
// calculate volume and smallest length
Vector3D a = crop_last_dim(hpoints[htetrahedra[i].x]);
Vector3D b = crop_last_dim(hpoints[htetrahedra[i].y]);
Vector3D c = crop_last_dim(hpoints[htetrahedra[i].z]);
Vector3D d = crop_last_dim(hpoints[htetrahedra[i].w]);
Vector3D ab = b-a; // these 3 are used for volume calculation
Vector3D ac = c-a;
Vector3D ad = d-a;
Vector3D bc = c-b;
Vector3D cd = d-c;
Vector3D bd = d-a;
float smallestLengthSquared = ab.squaredLength();
float sql = ac.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = ad.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = bc.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = cd.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
sql = bd.squaredLength();
if (sql<smallestLengthSquared) smallestLengthSquared = sql;
if (smallestLengthSquared <smallestAllowedLength*smallestAllowedLength)
{
continue;
}
if (smallestLengthSquared<totalSmallestLengthSquared)
totalSmallestLengthSquared = smallestLengthSquared;
Vector3D cross_product = cross(ab,ac);
float cross_length = cross_product.length();
//Length of vector ad projected onto cross product normal
float projected_length = dot(ad, cross_product/cross_length);
float volume = (1.0f/6.0f) * projected_length*cross_length;
if (volume<smallestAllowedVolume)
{
continue;
}
totalVolume += volume;
/*
static float smallestvolume = 100000;
if (volume<smallestvolume)
{
smallestvolume=volume;
printf("smallest element volume: %g\n", smallestvolume);
}
static float largestvolume = 0;
if (volume>largestvolume)
{
largestvolume=volume;
printf("largest element volume: %g\n", largestvolume);
}
*/
// printf("volume: %g\n", volume);
initialVolume[counter] = volume;
// if (volume<0.1)
// printf("volume: %f \n",volume);
mass[htetrahedra[i].x] += volume * 0.25f * density;
mass[htetrahedra[i].y] += volume * 0.25f * density;
mass[htetrahedra[i].z] += volume * 0.25f * density;
mass[htetrahedra[i].w] += volume * 0.25f * density;
counter++;
}
}
// these are the padded ones
for (int i = counter; i < tmpTetCount; i++)
{
tets[i].x = -1;
tets[i].y = -1;
tets[i].z = -1;
tets[i].w = -1;
}
printf("Total volume: %f\n", totalVolume);
mesh->numTetrahedra = tmpTetCount;
//copy points to a padded array
cudaFree(mesh->points);
cudaMalloc((void**)&(mesh->points), sizeof(Point) * tmpPointCount);
cudaMemcpy(mesh->points, hpoints, sizeof(Point) * mesh->numPoints, cudaMemcpyHostToDevice);
mesh->numPoints = tmpPointCount;
free(hpoints);
free(htetrahedra);
// free(pointMap);
free(numForces);
cudaFree(mesh->tetrahedra);
// for (int i=0; i<mesh->numPoints; i++)
// {
// printf("Vertex %i: %f, %f, %f\n", i, (points[i].x), (points[i].y), (points[i].z));
// }
cudaMalloc((void**)&(mesh->tetrahedra), sizeof(Tetrahedron) * mesh->numTetrahedra);
cudaMalloc((void**)&(mesh->writeIndices), sizeof(int4) * mesh->numWriteIndices);
cudaMalloc((void**)&(mesh->volume), sizeof(float) * mesh->numTetrahedra);
cudaMalloc((void**)&(mesh->mass), sizeof(float) * mesh->numPoints);
cudaMemcpy(mesh->tetrahedra, tets, sizeof(Tetrahedron) * mesh->numTetrahedra, cudaMemcpyHostToDevice);
cudaMemcpy(mesh->writeIndices, writeIndices, sizeof(int4) * mesh->numWriteIndices, cudaMemcpyHostToDevice);
cudaMemcpy(mesh->volume, initialVolume, sizeof(float) * mesh->numTetrahedra, cudaMemcpyHostToDevice);
cudaMemcpy(mesh->mass, mass, sizeof(float) * mesh->numPoints, cudaMemcpyHostToDevice);
/*
for (int i = 0; i < tmpPointCount; i++)
{
if (mass[i] == 0)
{
printf("warning: point without mass detected\n");
}
}
*/
// for (int i = 0; i < mesh->numWriteIndices; i++)
// {
// printf("%i, %i, %i, %i \n", writeIndices[i].x, writeIndices[i].y, writeIndices[i].z, writeIndices[i].w );
// }
CUT_CHECK_ERROR("Error deleting");
free(tets);
free(initialVolume);
free(writeIndices);
free(mass);
return_maxNumForces = maxNumForces;
return sqrtf(totalSmallestLengthSquared);
}
TetrahedralMesh* allocAndCopyMesh(Tetrahedron* hTets, int numTetrahedra, Point* hPoints, int numVertices)
{
Point *dPoints;
Tetrahedron *dTets;
cudaMalloc((void**)&dPoints, sizeof(Point) *numVertices);
cudaMalloc((void**)&dTets, sizeof(Tetrahedron)*numTetrahedra);
cudaMemcpy(dPoints, hPoints, sizeof(Point) *numVertices, cudaMemcpyHostToDevice);
cudaMemcpy(dTets, hTets, sizeof(Tetrahedron)*numTetrahedra , cudaMemcpyHostToDevice);
TetrahedralMesh* mesh = (TetrahedralMesh *) malloc(sizeof(TetrahedralMesh));
mesh->points = dPoints;
mesh->numPoints = numVertices;
mesh->tetrahedra = dTets;
mesh->numTetrahedra = numTetrahedra;
printf("Number of points: %i\n", mesh->numPoints);
printf("Number of tetrahedra: %i\n", mesh->numTetrahedra );
return mesh;
}
TetrahedralMesh* loadMesh(const char* filename)
{
FILE * pFile;
pFile = fopen(filename,"r");
if (!pFile) return NULL;
int numVertices;
int numTetrahedra;
fscanf (pFile, "%i\n", &numVertices);
fscanf (pFile, "%i\n", &numTetrahedra);
Tetrahedron* hTets = (Tetrahedron*) malloc(numTetrahedra*sizeof(Tetrahedron));
Point* hPoints = (Point*) malloc(numVertices*sizeof(Point));
for (int i=0; i<numVertices && !feof(pFile); i++)
{
Point newPoint;
fscanf (pFile, "%f %f %f\n", &(newPoint.x), &(newPoint.y), &(newPoint.z));
//printf("New vertex at %f, %f, %f\n", (newPoint.x), (newPoint.y), (newPoint.z));
/* newPoint.x *= 0.001;
newPoint.y *= 0.001;
newPoint.z *= 0.001;
*/
hPoints[i] = newPoint;
}
for (int i=0; i<numTetrahedra && !feof(pFile); i++)
{
Tetrahedron newTet;
fscanf (pFile, "%i %i %i %i\n", &(newTet.x), &(newTet.y), &(newTet.z), &(newTet.w));
//printf("New tetrahedron: %i, %i, %i, %i\n", (newTet.x), (newTet.y), (newTet.z), (newTet.w));
hTets[i]=newTet;
}
fclose (pFile);
TetrahedralMesh* mesh = allocAndCopyMesh(hTets, numTetrahedra, hPoints, numVertices);
free(hPoints);
free(hTets);
return mesh;
}
void copyStateToHost(TetrahedralTLEDState* state, TetrahedralMesh* mesh, Point* hPoints){
Point *dPoints = state->Ui_t;
cudaMemcpy(hPoints, dPoints, sizeof(Point) *mesh->numPoints, cudaMemcpyDeviceToHost);
}
void copyStateToDevice(TetrahedralTLEDState* state, TetrahedralMesh* mesh, Point* hPoints){
Point *dPoints = state->Ui_t;
cudaMemcpy(dPoints, hPoints, sizeof(Point) *mesh->numPoints, cudaMemcpyHostToDevice);
}
void calculateGravityForces(TetrahedralMesh* mesh, TetrahedralTLEDState *state)
{
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
calculateDrivingForces_k<<<make_uint3(pointSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(mesh->points, mesh->mass, state->externalForces, mesh->numPoints);
}
void applyFloorConstraint(TetrahedralMesh* mesh, TetrahedralTLEDState *state, float floorZPosition)
{
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
applyGroundConstraint_k<<<make_uint3(pointSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(mesh->points, state->Ui_t, state->Ui_tminusdt, floorZPosition, mesh->numPoints);
}
void calculateInternalForces(TetrahedralMesh* mesh, TetrahedralTLEDState *state)
{
int tetSize = (int)ceil(((float)mesh->numTetrahedra)/BLOCKSIZE);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
cudaBindTexture( 0, Ui_t_1d_tex, state->Ui_t, channelDesc );
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float>();
cudaBindTexture( 0, V0_1d_tex, mesh->volume, channelDesc2 );
calculateForces_k<<<make_uint3(tetSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>((Matrix4x3 *)state->shape_function_deriv, mesh->tetrahedra, state->Ui_t, mesh->volume, mesh->writeIndices, state->pointForces, state->maxNumForces, state->mu, state->lambda, mesh->numTetrahedra);
cudaUnbindTexture( V0_1d_tex );
cudaUnbindTexture( Ui_t_1d_tex );
}
void doTimeStep(TetrahedralMesh* mesh, TetrahedralTLEDState *state)
{
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
calculateInternalForces(mesh, state);
updateDisplacements_k<<<make_uint3(pointSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(state->Ui_t, state->Ui_tminusdt, mesh->mass, state->externalForces, state->pointForces, state->maxNumForces, state->ABC, mesh->numPoints);
float4 *temp = state->Ui_t;
state->Ui_t = state->Ui_tminusdt;
state->Ui_tminusdt = temp;
}
void precompute(TetrahedralMesh* mesh, TetrahedralTLEDState *state,
float density, float smallestAllowedVolume, float smallestAllowedLength,
float mu, float lambda, float timeStepFactor, float damping)
{
float smallestLength = CPUPrecalculation(mesh, BLOCKSIZE, state->maxNumForces, density, smallestAllowedVolume, smallestAllowedLength);
float nu = lambda/(2.0f*(lambda+mu));
float E = mu*(3.0f*lambda+2.0f*mu)/(lambda+mu);
float c = sqrt((E*(1.0f-nu))/(density*(1.0f-nu)*(1.0f-2.0f*nu)));
// the factor is to account for changes i c during deformation
float timeStep = timeStepFactor*smallestLength/c;
//float timeStep = 0.0001f;
printf("precompute: number of tetrahedra :%i \n",mesh->numTetrahedra);
state->mu = mu;
state->lambda = lambda;
int tetSize = (int)ceil(((float)mesh->numTetrahedra)/BLOCKSIZE);
int pointSize = (int)ceil(((float)mesh->numPoints)/BLOCKSIZE);
cudaMalloc((void**)&(state->ABC), sizeof(float4) * mesh->numPoints);
cudaMalloc((void**)&(state->Ui_t), sizeof(float4) * mesh->numPoints);
cudaMalloc((void**)&(state->Ui_tminusdt), sizeof(float4) * mesh->numPoints);
cudaMalloc((void**)&(state->pointForces), state->maxNumForces * sizeof(float4) * mesh->numPoints);
cudaMalloc((void**)&(state->externalForces), sizeof(float4) * mesh->numPoints);
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess ){
printf("\nCuda error detected in 'TLEDSolver::precompute': %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout);
exit(1);
}
cudaMemset(state->pointForces, 0, sizeof(float4) * state->maxNumForces * mesh->numPoints);
cudaMemset(state->externalForces, 0, sizeof(float4) * mesh->numPoints);
cudaMemset(state->Ui_t, 0, sizeof(float4) * mesh->numPoints);
cudaMemset(state->Ui_tminusdt, 0, sizeof(float4) * mesh->numPoints);
precalculateABC<<<make_uint3(pointSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(state->ABC, mesh->mass, timeStep, damping, mesh->numPoints);
cudaMalloc((void**)&(state->shape_function_deriv), sizeof(ShapeFunctionDerivatives) * mesh->numTetrahedra);
precalculateShapeFunctionDerivatives_k<<<make_uint3(tetSize,1,1), make_uint3(BLOCKSIZE,1,1)>>>(state->shape_function_deriv,
mesh->tetrahedra, mesh->points, mesh->numTetrahedra);
err = cudaGetLastError();
if( err != cudaSuccess ){
printf("\nCuda error detected in 'TLEDSolver::precompute': %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout);
exit(1);
}
state->timeStep = timeStep;
}
}
|
2119c92727a05b0542544fc7452ce35561719a6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=============================================================================
#include "header.h"
#define PROFILING
#define SHARED
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//=============================================================================
__global__ void pointWise_complex_matrix_mult_kernel_2d(hipfftComplex* img_spectrum, float* real_filter, hipfftComplex* out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
// A*B = (a + jb)(c + jd) = ac + ajd + cjb - bd
// = ac + (ad + cb)j - bd
// Re[A*B] = ac - bd
// Im[A*B] = ad + cb
// d = 0
// => Re(out) = ac and Im(out) = cb
#ifdef SHARED
shared_temp_array[witin_block_id] = img_spectrum[grid_index].x*real_filter[grid_index];
__syncthreads();
out[grid_index].x = shared_temp_array[witin_block_id];
shared_temp_array[witin_block_id] = real_filter[grid_index] * img_spectrum[grid_index].y;
__syncthreads();
out[grid_index].y = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
out[grid_index].x = img_spectrum[grid_index].x*real_filter[grid_index]; //Re(out)
out[grid_index].y = real_filter[grid_index] * img_spectrum[grid_index].y; //Im(out)
#endif
}
//=============================================================================
__global__ void magnitude_kernel(hipfftComplex* d_inverse_complex, float* d_inverse_mag)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
// Grab Real and Imaginary parts of d_inverse_complex
float a = d_inverse_complex[grid_index].x / float(IMG_SIZE);
float b = d_inverse_complex[grid_index].y / float(IMG_SIZE);
// Apply pythagorean formula (Euclidean L2-Norm)
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = sqrt(a*a + b*b);
__syncthreads();
d_inverse_mag[grid_index] = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
d_inverse_mag[grid_index] = sqrt(a*a + b*b);
#endif
}
//=============================================================================
__global__ void real_kernel(hipfftComplex* complex_in, float* real_out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = complex_in[grid_index].x / float(IMG_SIZE);
__syncthreads();
real_out[grid_index] = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
// Grab Real part of complex_in
real_out[grid_index] = complex_in[grid_index].x / float(IMG_SIZE);
#endif
}
//=============================================================================
__global__ void R2C_kernel(float* float_in, hipfftComplex* complex_out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = float_in[grid_index];
__syncthreads();
complex_out[grid_index].x = shared_temp_array[witin_block_id];
complex_out[grid_index].y = 0;
#endif
#ifndef SHARED
complex_out[grid_index].x = float_in[grid_index];
complex_out[grid_index].y = 0;
#endif
}
//=============================================================================
__global__ void yPlane_CSF_kernel(float* yPlane)
{
float temp = (2 * 32 / 512.0f);
float tempvar = -(256.0f) - 1 + 0.5f;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
tempvar = tempvar * temp;
//Vignesh: Candidate for dynamic parallelism; Loop unrolling
for (int j = 0; j < 512; j++)
{
tempvar += temp;
#ifdef SHARED
shared_temp_array[witin_block_id] = tempvar;
__syncthreads();
yPlane[j] = shared_temp_array[witin_block_id];
#else
yPlane[j] = tempvar;
#endif
}
}
//=============================================================================
__global__ void xPlane_CSF_kernel(float* xPlane)
{
float temp = (2 * 32 / 512.0f);
float tempvar = -(256.0f) - 1 + 0.5f;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
tempvar = tempvar * temp;
//Vignesh: Candidate for dynamic parallelism; Loop unrolling
for (int i = 0; i < 512; i++)
{
tempvar += temp;
#ifdef SHARED
shared_temp_array[witin_block_id] = tempvar;
__syncthreads();
xPlane[i] = shared_temp_array[witin_block_id];
#else
xPlane[i] = tempvar;
#endif
}
}
//=============================================================================
__global__ void map_to_luminance_domain_kernel1(float* float_img_in, float* L_hat)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
// Map from Pixel Domain [unitless] to Luminance Domain [cd/m^2] - (MAD eq. 1 and eq. 2)
#ifdef SHARED
shared_temp_array[witin_block_id] = pow((0.02874f*float_img_in[grid_index]), (2.2f / 3.0f));
__syncthreads();
L_hat[grid_index] = shared_temp_array[witin_block_id];
#else
L_hat[grid_index] = pow((0.02874f*float_img_in[grid_index]), (2.2f / 3.0f));
#endif
}
//=============================================================================
__global__ void map_to_luminance_domain_kernel2(float* float_img_in1, float* L_hat1, float* float_img_in2, float* L_hat2)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
// Map from Pixel Domain [unitless] to Luminance Domain [cd/m^2] - (MAD eq. 1 and eq. 2)
#ifdef SHARED
shared_temp_array[witin_block_id] = pow((0.02874f*float_img_in1[grid_index]), (2.2f / 3.0f));
L_hat1[grid_index] = shared_temp_array[witin_block_id];
shared_temp_array[witin_block_id] = pow((0.02874f*float_img_in1[grid_index]), (2.2f / 3.0f));
L_hat2[grid_index] = shared_temp_array[witin_block_id];
#else
L_hat1[grid_index] = pow((0.02874f*float_img_in1[grid_index]), (2.2f / 3.0f));
L_hat2[grid_index] = pow((0.02874f*float_img_in2[grid_index]), (2.2f / 3.0f));
#endif
}
//=============================================================================
__global__ void error_img_kernel(const float* ref, const float* dst, float* err)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
#ifdef SHARED
shared_temp_array[witin_block_id] = ref[grid_index] - dst[grid_index];
__syncthreads();
err[grid_index] = shared_temp_array[witin_block_id];
#else
err[grid_index] = ref[grid_index] - dst[grid_index];
#endif
}
//=============================================================================
__global__ void build_CSF_kernel(float* csf, const float* yPlane, const float* xPlane)
{
// Masking / luminance parameters
float k = 0.02874;
float G = 0.5; // luminance threshold
float C_slope = 1; // slope of detection threshold
float Ci_thrsh = -5; // contrast to start slope, rather than const threshold
float Cd_thrsh = -5; // saturated threshold
float ms_scale = 1; // scaling constant
float s_dbl, radfreq_dbl;
float temp = (2 * 32 / 512.0);
float tempvar = -(256.0) - 1 + 0.5f;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
//int idx = 0;
//for (register int i = 0; i < width; i++)
int i = blockIdx.x * blockDim.x + threadIdx.x; //Create unique Grid Index in x-dimension
{
float xVar = xPlane[i];
//for (register int j = 0; j < height; j++)
int j = blockIdx.y * blockDim.y + threadIdx.y; //Create unique Grid Index in y-dimension
{
float yVar = yPlane[j];
s_dbl = ((1 - 0.7f) / 2 * cos(4 * atan2(yVar, xVar))) + (1 + 0.7f) / 2;
radfreq_dbl = sqrt(xVar*xVar + yVar*yVar) / s_dbl;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = j * grid_width + i;
// (MAD eq. 3)
if (radfreq_dbl < 7.8909)
{
#ifdef SHARED
shared_temp_array[witin_block_id] = 0.9809f;
__syncthreads();
csf[grid_index] = shared_temp_array[witin_block_id];
#else
csf[grid_index] = 0.9809f;
#endif
}
else
{
float tmp_real = 2.6f*(0.0192f + 0.114f* radfreq_dbl)*exp(-pow((0.114f*radfreq_dbl), 1.1f));
#ifdef SHARED
shared_temp_array[witin_block_id] = tmp_real;
__syncthreads();
csf[grid_index] = shared_temp_array[witin_block_id];
#else
csf[grid_index] = tmp_real;
#endif
}
}
}
}
//=============================================================================
__global__ void fftShift_kernel(float* img)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
int k = 0;
#endif
//Vignesh : Reduce the conditional to single one
if (i < 256)
{
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (j < 256)
{
#ifdef SHARED
float temp = img[i * 512 + j];
img[i * 512 + j] = img[(i + 256) * 512 + (j + 256)];
img[(i + 256) * 512 + (j + 256)] = temp;
temp = img[(i + 256) * 512 + j];
img[(i + 256) * 512 + j] = img[i * 512 + (j + 256)];
img[i * 512 + (j + 256)] = temp;
#else
// Preserve spatial locality by storing data in registers
float temp = img[i * 512 + j];
img[i * 512 + j] = img[(i + 256) * 512 + (j + 256)];
img[(i + 256) * 512 + (j + 256)] = temp;
temp = img[(i + 256) * 512 + j];
img[(i + 256) * 512 + j] = img[i * 512 + (j + 256)];
img[i * 512 + (j + 256)] = temp;
#endif
}
}
}
//=============================================================================
__global__ void delta_stats_kernel(float *ref_outStd, float *ref_outSkw, float* ref_outKrt,
float* dst_outStd, float* dst_outSkw, float* dst_outKrt, float scale, float* eta)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
//Vignesh: Optimization below where the value can be copied to register and use it rather than
//using global reference all the time
float delta_stat1 = abs(ref_outStd[grid_index] - dst_outStd[grid_index]);
float delta_stat2 = abs(ref_outSkw[grid_index] - dst_outSkw[grid_index]);
float delta_stat3 = abs(ref_outKrt[grid_index] - dst_outKrt[grid_index]);
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = scale*(delta_stat1 + 2 * delta_stat2 + delta_stat3);
__syncthreads();
eta[grid_index] += shared_temp_array[witin_block_id];
#else
eta[grid_index] += scale*(delta_stat1 + 2 * delta_stat2 + delta_stat3);
#endif
}
//=============================================================================
__global__ void fast_lo_stats_kernel(float* xVal, float* outStd, float* outSkw, float* outKrt)
{
//Declarations
//__shared__ float xVal_Shm[256];
float xVal_local[256];
float mean, stdev, skw, krt, stmp;
int iB, jB;
//for (i = 0; i<512 - 15; i += 4)
int i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
//for (j = 0; j<512 - 15; j += 4)
int j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497)
{
// THE FOLLOWING SET OF RUNNING SUMS CAN BE A set of PARALLEL REDUCTIONs (in shared memory?)
// 256 itteratios -> log2(256)=8 itterations
// Store block into registers (256 x 4Bytes = 1kB)
int idx = 0;
for (iB = i; iB < i + 16; iB++)
{
for (jB = j; jB < j + 16; jB++)
{
xVal_local[idx] = xVal[iB * 512 + jB];
idx++;
}
}
//Traverse through and get mean
float mean = 0;
for (idx = 0; idx < 256; idx++)
mean += xVal_local[idx]; //this can be a simple reduction in shared memory
mean = mean / 256.0f;
//Traverse through and get stdev, skew and kurtosis
stdev = 0;
skw = 0;
krt = 0;
float xV_mean = 0;
for (idx = 0; idx < 256; idx++)
{
// Place this commonly re-used value into a register to preserve temporal localitiy
xV_mean = xVal_local[idx] - mean;
stdev += xV_mean*xV_mean;
skw += xV_mean*xV_mean*xV_mean;
krt += xV_mean*xV_mean*xV_mean*xV_mean;
}
stmp = sqrt(stdev / 256.0f);
stdev = sqrt(stdev / 255.0f);//MATLAB's std is a bit different
if (stmp != 0){
skw = (skw / 256.0f) / ((stmp)*(stmp)*(stmp));
krt = (krt / 256.0f) / ((stmp)*(stmp)*(stmp)*(stmp));
}
else{
skw = 0;
krt = 0;
}
//---------------------------------------------------------------------------
// This is the nearest neighbor interpolation - ACTUALLY NOT NEEDED!!!!!!!!
// To remove the nested for loop here we need to modifie the algorithm to
// adjust for the pointwise muliplication done far later that uses a
// 512x512 dimension matrix derived from the matrices this kernel produces
// The modified output would be PxP (as described mathematically in the paper).
//---------------------------------------------------------------------------
// Only this final output should be written to global memory:
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
outStd[(iB * 512) + jB] = stdev;
outSkw[(iB * 512) + jB] = skw;
outKrt[(iB * 512) + jB] = krt;
}
}
}
}
}
//=============================================================================
__global__ void zeta_map_kernel(float* outMean, float* outStd, float* outStdMod, float* zeta)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int p = index_y * grid_width + index_x;
// Compute pth element of RMS contrast map C_err (MAD eq. 4 and eq. 5)
float C_org = log(outStdMod[p] / outMean[p]);
float C_err = log(outStd[p] / outMean[p]);
if (outMean[p] < 0.5)
C_err = -999999999999999; // log(0) = -infinity
// Compute local visibility distortion map (MAD eq. 6)
float delta = -5.0;
if ((C_err > C_org) && (C_org > delta))
zeta[p] = C_err - C_org;
else if ((C_err > delta) && (delta >= C_org))
zeta[p] = C_err - delta;
else
zeta[p] = 0;
}
//=============================================================================
__global__ void square_of_difference_kernel(float* ref, float* dst, float* out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int i = index_y * grid_width + index_x;
out[i] = (ref[i] - dst[i])*(ref[i] - dst[i]);
}
//=============================================================================
__global__ void LMSE_map_kernel(float* reflut, float* D)
{
// This is kernel #D14
//for (int j = 0; j < boundaray; j++)
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < N - 15)
{
int idx = LMSE_CONST + j*N;
//for (int i = 0; i < N - 15; i++)
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N - 15)
{
int idx = (LMSE_CONST + j*N) + i - 1;
float temp_dbl = 0;
int sub_idx = idx - (7 + 7 * N);
for (int jB = j; jB < j + 16; ++jB)
{
for (int iB = i; iB < i + 16; ++iB)
{
temp_dbl += reflut[sub_idx];
++sub_idx;
}
sub_idx += N - 16;
}
D[idx] = temp_dbl / 256.0;
}
}
}
//=============================================================================
//=============================================================================
__global__ void fast_hi_stats_kernel1(float* absRefs, float* absDsts, float* outStd, float* outStdMod, float* outMean, float* ref_img, float* dst_img, float* TMP)
{
float mean, mean2, stdev;
int i, j, iB, jB;
i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497)
{
//Traverse through and get mean
mean = 0;
mean2 = 0;
for (iB = i; iB < i + 16; iB++)
{
for (jB = j; jB < j + 16; jB++)
{
mean += absRefs[(iB * 512) + jB];
mean2 += absDsts[(iB * 512) + jB];
}
}
mean = mean / 256.0f;
mean2 = mean2 / 256.0f;
//Traverse through and get stdev
stdev = 0;
for (iB = i; iB < i + 16; iB++)
{
for (jB = j; jB < j + 16; jB++)
{
float temp = absRefs[(iB * 512) + jB] - mean;
stdev += temp*temp;
}
}
stdev = sqrt(stdev / 255.0);//MATLAB's std is a bit different
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
outMean[(iB * 512) + jB] = mean2;// mean of reference
outStd[(iB * 512) + jB] = stdev;// stdev of dst
}
}
} // end for over j
} // end for over i
//====================================================================
//Modified STD
//for (i = 0; i < 512 - 15; i += 4)
i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
//for (j = 0; j < 512 - 15; j += 4)
j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497) //512-15=497
{
//Traverse through and get mean
mean = 0;
for (iB = i; iB < i + 8; iB++)
{
for (jB = j; jB < j + 8; jB++)
{
mean += absDsts[(iB * 512) + jB];
}
}
mean = mean / 64.0f;
//Traverse through and get stdev
stdev = 0;
for (iB = i; iB < i + 8; iB++)
{
for (jB = j; jB < j + 8; jB++)
{
float temp = absDsts[(iB * 512) + jB] - mean;
stdev += (temp)*(temp);
}
}
stdev = sqrt(stdev / 63.0);//MATLAB's std is a bit different
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
TMP[(iB * 512) + jB] = stdev;// stdev of ref
outStdMod[(iB * 512) + jB] = stdev;
}
}
}
}
}
__global__ void fast_hi_stats_kernel2(float* absRefs, float* absDsts, float* outStd, float* outStdMod, float* outMean, float* ref_img, float* dst_img, float* TMP)
{
//Declarations
float mean, mean2, stdev;
//float* TMP = (float *)malloc(N * N*sizeof(float));
int i, j, iB, jB;
//for (i = 0; i < 512 - 15; i += 4)
i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
//for (j = 0; j < 512 - 15; j += 4)
j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497) //512-15=497
{
mean = TMP[(i * 512) + j];
for (iB = i; iB < i + 8; iB += 5)
{
for (jB = j; jB < j + 8; jB += 5)
{
if (iB < 512 - 15 && jB < 512 - 15 && mean > TMP[(iB * 512) + jB])
mean = TMP[(iB * 512) + jB];
}
}
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
outStdMod[(iB * 512) + jB] = mean;
}
}
}
}
}
//=============================================================================
__global__ void product_array_kernel(float* out, float* in1, float* in2)
{
//for (int i = BLOCK_SIZE; i < N - BLOCK_SIZE - 1; i++)
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= BLOCK_SIZE && i < N - BLOCK_SIZE - 1)
{
//for (int j = BLOCK_SIZE; j < N - BLOCK_SIZE - 1; j++)
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= BLOCK_SIZE && j < N - BLOCK_SIZE - 1)
out[i*N + j] = in1[i*N + j] * in2[i*N + j];
}
}
//=============================================================================
__global__ void buildGabor(float* logGabor, const int orientIdx, const int scaleIdx)
{
const float nOrient = 4.0f;
const float nScale = 5.0f;
const float sigmaOnf = 0.55f;
float wavelength[5] = { 3.0f, 9.0f, 27.0f, 81.0f, 243.0f };
float angl = orientIdx * PI / nOrient; // Calculate filter angle
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
#endif
// Construct the filter - first calculate the radial filter component.
float fo = 1.0f / wavelength[scaleIdx]; // Centre frequency of filter.
float rfo = fo / 0.5f; // Normalised radius from centre of frequency plane
// corresponding to fo.
//for (int i = 0; i < N; i++)
int i = threadIdx.x + blockDim.x * blockIdx.x; //Gives the x index in the grid
int j = threadIdx.y + blockDim.y * blockIdx.y; // Gives the y index in the grid
int witin_grid_id = i * blockDim.x * blockDim.y + j; //Gives overall thread index within grid
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y; //Gives the threadId within a block only
int lin_idx = i * N + j; //This is old implementation
//Going to use within_grid_id instead of lin_idx
//printf("i = %d j = %d inside_grid_id = %d inside_block_id = %d lin_idx = %d \n", i, j, witin_grid_id, witin_block_id, I, J, lin_idx);
// REDUCE GLOBAL MEMORY TRAFIC BY COMPUTING EACH INDIVIDUAL MATRIX VALUE IN EACH THREAD
float Y = -1 + i *0.003906f;
float X = -1 + j *0.003906f;
float sin_theta_temp = sin(atan2f(-Y, X));
float cos_theta_temp = cos(atan2f(-Y, X));
float radius_temp = sqrt(X * X + Y * Y);
float ds = sin_theta_temp * cos(angl) - cos_theta_temp * sin(angl); // Difference in sin
float dc = cos_theta_temp * cos(angl) + sin_theta_temp * sin(angl); // Difference in cos
float diffTheta = abs(atan2(ds, dc)); // Absolute angular distance
float spread = exp((-diffTheta * diffTheta) / (2 * thetaSigma * thetaSigma)); // Calculate the angular filter component.
float gabor = exp((-(log(radius_temp / rfo)) * (log(radius_temp / rfo)) / (2 * log(0.55f) * log(0.55f))));
#ifdef SHARED
shared_temp_array[witin_block_id] = spread * gabor;
__syncthreads();
logGabor[lin_idx] = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
logGabor[lin_idx] = spread * gabor;
#endif
// WHY TEST ALL VALUES?
// EXECUTE KERNEL THEN ONLY MODIFY THIS SINGLE VALUE IN ANOTHER KERNEL
if (lin_idx == 131328)// lin_idx = (N^2 + N)/2 = 131328 (N=512)
logGabor[lin_idx] = 0.0f; //Get rid of the 0 radius value
}
//=============================================================================
hipError_t kernel_wrapper(const cv::Mat &mat_ref, const cv::Mat &mat_dst)
{
int GPU_N, device_num_used;
hipGetDeviceCount(&GPU_N);
//OSU Workstation : 0 = Tesla, 1 = Titan1, 2 = Titan2
//ASU Workstation: 0 = Tesla, 1 = Quadro (don't use)
device_num_used = 0;
hipError_t cudaStatus = hipSetDevice(device_num_used); // OSU Workstation: 0=Tesla, 1=Titan1, 2=Titan2
if (cudaStatus != hipSuccess)
fprintf(stderr, "hipSetDevice failed!");
// Allocate Page-locked (Pinned) HOST-memory
float* h_img_ref_float; hipHostMalloc(&h_img_ref_float, REAL_SIZE);
float* h_img_dst_float; hipHostMalloc(&h_img_dst_float, REAL_SIZE);
hipfftComplex* h_ref_cufft; hipHostMalloc(&h_ref_cufft, COMPLEX_SIZE);
hipfftComplex* h_dst_cufft; hipHostMalloc(&h_dst_cufft, COMPLEX_SIZE);
float* h_eta; hipHostMalloc(&h_eta, REAL_SIZE);
float* h_product; hipHostMalloc(&h_product, REAL_SIZE);
// Allocate DEVICE memory -- Appearance (Hi-Index)
float* d_img_ref_float; hipMalloc((void **)&d_img_ref_float, REAL_SIZE);
float* d_img_dst_float; hipMalloc((void **)&d_img_dst_float, REAL_SIZE);
float* d_L_hat_ref; hipMalloc((void **)&d_L_hat_ref, REAL_SIZE);
float* d_L_hat_dst; hipMalloc((void **)&d_L_hat_dst, REAL_SIZE);
hipfftComplex* d_L_hat_ref_complex; hipMalloc((void **)&d_L_hat_ref_complex, COMPLEX_SIZE);
hipfftComplex* d_L_hat_dst_complex; hipMalloc((void **)&d_L_hat_dst_complex, COMPLEX_SIZE);
float* d_CSF; hipMalloc((void **)&d_CSF, REAL_SIZE);
float* d_xPlane; hipMalloc((void **)&d_xPlane, 512 * sizeof(float));
float* d_yPlane; hipMalloc((void **)&d_yPlane, 512 * sizeof(float));
float* d_I_prime_org; hipMalloc((void **)&d_I_prime_org, REAL_SIZE);
float* d_I_prime_dst; hipMalloc((void **)&d_I_prime_dst, REAL_SIZE);
float* d_I_prime_err; hipMalloc((void **)&d_I_prime_err, REAL_SIZE);
float* d_outStd; hipMalloc((void **)&d_outStd, REAL_SIZE);
float* d_outStdMod; hipMalloc((void **)&d_outStdMod, REAL_SIZE);
float* d_outMean; hipMalloc((void **)&d_outMean, REAL_SIZE);
float* d_reflut; hipMalloc((void **)&d_reflut, REAL_SIZE);
float* d_TEMP; hipMalloc((void **)&d_TEMP, REAL_SIZE);
float* d_zeta; hipMalloc((void **)&d_zeta, REAL_SIZE);
float* d_lmse; hipMalloc((void **)&d_lmse, REAL_SIZE);
float* d_product; hipMalloc((void **)&d_product, REAL_SIZE);
// Allocate DEVICE memory -- Appearance (Lo-Index)
hipfftComplex* d_ref_cufft; hipMalloc((void **)&d_ref_cufft, COMPLEX_SIZE);
hipfftComplex* d_dst_cufft; hipMalloc((void **)&d_dst_cufft, COMPLEX_SIZE);
float* d_logGabor; hipMalloc((void **)&d_logGabor, REAL_SIZE);
hipfftComplex* d_ref_c; hipMalloc((void **)&d_ref_c, COMPLEX_SIZE);
hipfftComplex* d_dst_c; hipMalloc((void **)&d_dst_c, COMPLEX_SIZE);
float* d_ref_c_mag; hipMalloc((void **)&d_ref_c_mag, REAL_SIZE);
float* d_dst_c_mag; hipMalloc((void **)&d_dst_c_mag, REAL_SIZE);
float* d_ref_Std; hipMalloc((void **)&d_ref_Std, REAL_SIZE);
float* d_ref_Skw; hipMalloc((void **)&d_ref_Skw, REAL_SIZE);
float* d_ref_Krt; hipMalloc((void **)&d_ref_Krt, REAL_SIZE);
float* d_dst_Std; hipMalloc((void **)&d_dst_Std, REAL_SIZE);
float* d_dst_Skw; hipMalloc((void **)&d_dst_Skw, REAL_SIZE);
float* d_dst_Krt; hipMalloc((void **)&d_dst_Krt, REAL_SIZE);
float* d_eta; hipMalloc((void **)&d_eta, REAL_SIZE);
// Creates stream and cuFFT plans and set them in different streams
const int NUM_STREAMS = 10;
hipStream_t stream[NUM_STREAMS];
hipfftHandle* fftPlan = (hipfftHandle*)malloc(sizeof(hipfftHandle)*NUM_STREAMS);
for (int i = 0; i < NUM_STREAMS; i++)
{
hipStreamCreate(&stream[i]);
hipfftPlan2d(&fftPlan[i], N, N, HIPFFT_C2C);
hipfftSetStream(fftPlan[i], stream[i]);
}
// Configuration Parameters - EXPERIMENT WITH THESE TO DETERMINE OPTIMAL VALUES!!!!!!!
//(Launch most kernels as 4-dimensional functions - with overall 512x512 threads in grid):
dim3 gridSize(32, 32, 1);
dim3 blockSize(16, 16, 1);
//dim3 gridSize(2, 2, 1);
//dim3 blockSize(2, 2, 1);
dim3 fftShift_grid_size(16, 8, 1);
dim3 fftShift_block_size(32, 32, 1);
// The lo-stats kernels only need to be launced as (512^2)/4 threads due to
// the 4 pixel sliding window (i.e. only 12 pixel overlap in neighboring 16x16 blocks)
dim3 loStats_Grid_size(8, 8);
dim3 loStats_Block_size(16, 16);
//Constant memory copy
float h_nOrient = 4.0f;
float h_nScale = 5.0f;
float h_sigmaOnf = 0.55f;
float h_wavelength[5] = { 3.0f, 9.0f, 27.0f, 81.0f, 243.0f };
hipMemcpyToSymbol(wavelength, h_wavelength, 5 * sizeof(float *)); //copy pointer *value* to constant memory
hipMemcpyToSymbol(nOrient, &h_nOrient, sizeof(float));
hipMemcpyToSymbol(nScale, &h_nScale, sizeof(float));
hipMemcpyToSymbol(sigmaOnf, &h_sigmaOnf, sizeof(float));
//----------------------------------------------------------------------------
// Program initialization complete - Begin main program body:
//----------------------------------------------------------------------------
std::cout << "Beginning Detection Stage" << std::endl;
// Start CPU Timing
int itteration_num = 1;
double timing_sum = 0.0;
LARGE_INTEGER start_CPU, end_CPU, frequency_CPU;
float milliseconds_CPU;
QueryPerformanceFrequency(&frequency_CPU);
QueryPerformanceCounter(&start_CPU);
for (int timing_idx = 0; timing_idx < itteration_num; ++timing_idx)
{
// Begin NVTX Marker:
roctxRangePushA("CUDA-MAD");
// Build CSF on Device
yPlane_CSF_kernel << < 1, 1, 0, stream[1] >> >(d_yPlane);
xPlane_CSF_kernel << < 1, 1, 0, stream[1] >> >(d_xPlane);
build_CSF_kernel << < gridSize, blockSize, 0, stream[1] >> >(d_CSF, d_yPlane, d_xPlane);
fftShift_kernel << < fftShift_grid_size, fftShift_block_size, 0, stream[1] >> >(d_CSF);
// Linearize REAL image data and copy data from HOST -> DEVICE
roctxRangePushA("Linearize ref");// Begin NVTX Marker for Linearize ref
linearize_and_cast_from_Mat_to_float(mat_ref, h_img_ref_float);
roctxRangePop(); // End NVTX Marker for Linearize ref
hipMemcpyAsync(d_img_ref_float, h_img_ref_float, REAL_SIZE, hipMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
//hipMemcpy(d_img_ref_float, h_img_ref_float, REAL_SIZE, hipMemcpyDeviceToHost); //DEVICE -> HOST
map_to_luminance_domain_kernel1 << < gridSize, blockSize, 0, stream[1] >> >(d_img_ref_float, d_L_hat_ref);
R2C_kernel << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_ref, d_L_hat_ref_complex);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_L_hat_ref_complex, (hipfftComplex *)d_L_hat_ref_complex, HIPFFT_FORWARD);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_ref_complex, d_CSF, d_L_hat_ref_complex);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_L_hat_ref_complex, (hipfftComplex *)d_L_hat_ref_complex, HIPFFT_BACKWARD);
real_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_L_hat_ref_complex, d_I_prime_org);
roctxRangePushA("Linearize dst");// Begin NVTX Marker for Linearize ref
linearize_and_cast_from_Mat_to_float(mat_dst, h_img_dst_float);
roctxRangePop(); // End NVTX Marker for Linearize ref
hipMemcpyAsync(d_img_dst_float, h_img_dst_float, REAL_SIZE, hipMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
//hipMemcpy(d_img_dst_float, h_img_dst_float, REAL_SIZE, hipMemcpyDeviceToHost); //DEVICE -> HOST
map_to_luminance_domain_kernel1 << < gridSize, blockSize, 0, stream[1] >> >(d_img_dst_float, d_L_hat_dst);
R2C_kernel << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_dst, d_L_hat_dst_complex);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_L_hat_dst_complex, (hipfftComplex *)d_L_hat_dst_complex, HIPFFT_FORWARD);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_dst_complex, d_CSF, d_L_hat_dst_complex);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_L_hat_dst_complex, (hipfftComplex *)d_L_hat_dst_complex, HIPFFT_BACKWARD);
real_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_L_hat_dst_complex, d_I_prime_dst);
// Detection Statistics
square_of_difference_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_img_ref_float, d_img_dst_float, d_reflut);
LMSE_map_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_reflut, d_lmse);
error_img_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_I_prime_org, d_I_prime_dst, d_I_prime_err);
fast_hi_stats_kernel1 << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_I_prime_err, d_I_prime_org, d_outStd, d_outStdMod, d_outMean, d_img_ref_float, d_img_dst_float, d_TEMP);
fast_hi_stats_kernel2 << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_I_prime_err, d_I_prime_org, d_outStd, d_outStdMod, d_outMean, d_img_ref_float, d_img_dst_float, d_TEMP);
zeta_map_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_outMean, d_outStd, d_outStdMod, d_zeta);
product_array_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_product, d_zeta, d_lmse); // Product inside summation in MAD eq. 7
//hipMemcpyAsync(h_product, d_product, REAL_SIZE, hipMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Begin Gabor Filterbank:
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Exectute "in-place" C2C 2D-DFT of REF (used in the LEFT side of the Gabor Filterbank)
R2C_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_img_ref_float, d_ref_cufft);
R2C_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_img_dst_float, d_dst_cufft);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_ref_cufft, (hipfftComplex *)d_ref_cufft, HIPFFT_FORWARD);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_dst_cufft, (hipfftComplex *)d_dst_cufft, HIPFFT_FORWARD);
float scale[5] = { 0.5, 0.75, 1, 5, 6 };
for (int o = 0; o < 4; o++)
{
for (int s = 0; s < 5; s++)
{
//buildGabor << < gridSize, blockSize, 0, stream[1] >> >(d_logGabor, o, s);
buildGabor << < gridSize, blockSize, 0, stream[1] >> >(d_logGabor, o, s);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
printf("Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
fftShift_kernel << < fftShift_grid_size, fftShift_block_size, 0, stream[1] >> >(d_logGabor);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_ref_cufft, d_logGabor, d_ref_c);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_dst_cufft, d_logGabor, d_dst_c);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_ref_c, (hipfftComplex *)d_ref_c, HIPFFT_BACKWARD);
hipfftExecC2C(fftPlan[1], (hipfftComplex *)d_dst_c, (hipfftComplex *)d_dst_c, HIPFFT_BACKWARD);
magnitude_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_ref_c, d_ref_c_mag);
magnitude_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_dst_c, d_dst_c_mag);
fast_lo_stats_kernel << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_ref_c_mag, d_ref_Std, d_ref_Skw, d_ref_Krt);
fast_lo_stats_kernel << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_dst_c_mag, d_dst_Std, d_dst_Skw, d_dst_Krt);
delta_stats_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_ref_Std, d_ref_Skw, d_ref_Krt,
d_dst_Std, d_dst_Skw, d_dst_Krt, scale[s] / 13.25f, d_eta);
}
}
// Copy final eta map back to HOST for collapse (NEEDS TO BE DONE VIA REDUCTION)!
//hipMemcpyAsync(h_eta, d_eta, REAL_SIZE, hipMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
// Host Code waits here on memcpy
hipMemcpy(h_product, d_product, REAL_SIZE, hipMemcpyDeviceToHost); //DEVICE -> HOST
// Collapse the visibility-weighted local MSE via L2-norm (MAD eq. 7)
roctxRangePushA("CPU Detection Map Collapse");
float d_detect = reduce_sum_of_squares_2D_CPU(h_product, BLOCK_SIZE, N - BLOCK_SIZE - 1);
d_detect = sqrt(d_detect) / sqrt(229441.0f); // Number of itterations in loop: counter = 229441
d_detect = d_detect * 200;
roctxRangePop();
hipMemcpy(h_eta, d_eta, REAL_SIZE, hipMemcpyDeviceToHost); //DEVICE -> HOST
roctxRangePushA("CPU Appearance Map Collapse");
float d_appear = reduce_sum_of_squares_2D_CPU(h_eta, BLOCK_SIZE, N - BLOCK_SIZE);
d_appear = sqrt(d_appear) / 479.0f;
roctxRangePop();
float beta1 = 0.467;
float beta2 = 0.130;
float alpha = 1 / (1 + beta1*pow(d_detect, beta2));
float MAD = pow(d_detect, alpha)*pow(d_appear, 1 - alpha);
// End NVTX Marker for CUDA-MAD:
roctxRangePop();
// End CPU Timing
QueryPerformanceCounter(&end_CPU);
milliseconds_CPU = (end_CPU.QuadPart - start_CPU.QuadPart) *
1000.0 / frequency_CPU.QuadPart;
timing_sum += milliseconds_CPU;
std::cout << "Hi-Index d_detect = " << d_detect << std::endl;
std::cout << "Lo-Index d_appear = " << d_appear << std::endl;
std::cout << "\nMAD = " << MAD << std::endl;
} // End timing loop
fprintf(stderr, "\nTime = %.3f ms\n", timing_sum / double(itteration_num));
//----------------------------------------------------------------------------
// Main program body complete - Perform closing operations:
//----------------------------------------------------------------------------
//Error:
// De-allocate memory here...
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
hipDeviceReset();
#ifndef PROFILING
getch();
#endif
return cudaStatus;
}
//=============================================================================
void linearize_and_cast_from_Mat_to_float(const cv::Mat& mat_in, float* h_float)
{
for (int row = 0; row < 512; row++)
for (int col = 0; col < 512; col++)
h_float[row * 512 + col] = static_cast<float>(mat_in.at<unsigned char>(row, col));
}
//=============================================================================
float reduce_sum_of_squares_2D_CPU(float* in, const int INSIDE_BOUND, const int OUTSIDE_BOUND)
{
float sum = 0.0f;
for (int i = INSIDE_BOUND; i < OUTSIDE_BOUND; i++)
{
for (int j = INSIDE_BOUND; j < OUTSIDE_BOUND; j++)
sum += in[i*N + j] * in[i*N + j];
}
return sum;
}
//=============================================================================
void write_to_file_DEBUG(float* w, const int SIZE)
{
std::ofstream outFile;
outFile.open("TEST.txt");
for (int i = 0; i < SIZE; i++) // Itterate over rows
{
for (int j = 0; j < SIZE; j++) // Itterate over cols
outFile << w[i * SIZE + j] << " ";
if (i != SIZE - 1)
outFile << ";\n";
}
outFile.close();
}
//=============================================================================
| 2119c92727a05b0542544fc7452ce35561719a6f.cu | //=============================================================================
#include "header.h"
#define PROFILING
#define SHARED
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//=============================================================================
__global__ void pointWise_complex_matrix_mult_kernel_2d(cufftComplex* img_spectrum, float* real_filter, cufftComplex* out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
// A*B = (a + jb)(c + jd) = ac + ajd + cjb - bd
// = ac + (ad + cb)j - bd
// Re[A*B] = ac - bd
// Im[A*B] = ad + cb
// d = 0
// => Re(out) = ac and Im(out) = cb
#ifdef SHARED
shared_temp_array[witin_block_id] = img_spectrum[grid_index].x*real_filter[grid_index];
__syncthreads();
out[grid_index].x = shared_temp_array[witin_block_id];
shared_temp_array[witin_block_id] = real_filter[grid_index] * img_spectrum[grid_index].y;
__syncthreads();
out[grid_index].y = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
out[grid_index].x = img_spectrum[grid_index].x*real_filter[grid_index]; //Re(out)
out[grid_index].y = real_filter[grid_index] * img_spectrum[grid_index].y; //Im(out)
#endif
}
//=============================================================================
__global__ void magnitude_kernel(cufftComplex* d_inverse_complex, float* d_inverse_mag)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
// Grab Real and Imaginary parts of d_inverse_complex
float a = d_inverse_complex[grid_index].x / float(IMG_SIZE);
float b = d_inverse_complex[grid_index].y / float(IMG_SIZE);
// Apply pythagorean formula (Euclidean L2-Norm)
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = sqrt(a*a + b*b);
__syncthreads();
d_inverse_mag[grid_index] = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
d_inverse_mag[grid_index] = sqrt(a*a + b*b);
#endif
}
//=============================================================================
__global__ void real_kernel(cufftComplex* complex_in, float* real_out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = complex_in[grid_index].x / float(IMG_SIZE);
__syncthreads();
real_out[grid_index] = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
// Grab Real part of complex_in
real_out[grid_index] = complex_in[grid_index].x / float(IMG_SIZE);
#endif
}
//=============================================================================
__global__ void R2C_kernel(float* float_in, cufftComplex* complex_out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = float_in[grid_index];
__syncthreads();
complex_out[grid_index].x = shared_temp_array[witin_block_id];
complex_out[grid_index].y = 0;
#endif
#ifndef SHARED
complex_out[grid_index].x = float_in[grid_index];
complex_out[grid_index].y = 0;
#endif
}
//=============================================================================
__global__ void yPlane_CSF_kernel(float* yPlane)
{
float temp = (2 * 32 / 512.0f);
float tempvar = -(256.0f) - 1 + 0.5f;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
tempvar = tempvar * temp;
//Vignesh: Candidate for dynamic parallelism; Loop unrolling
for (int j = 0; j < 512; j++)
{
tempvar += temp;
#ifdef SHARED
shared_temp_array[witin_block_id] = tempvar;
__syncthreads();
yPlane[j] = shared_temp_array[witin_block_id];
#else
yPlane[j] = tempvar;
#endif
}
}
//=============================================================================
__global__ void xPlane_CSF_kernel(float* xPlane)
{
float temp = (2 * 32 / 512.0f);
float tempvar = -(256.0f) - 1 + 0.5f;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
tempvar = tempvar * temp;
//Vignesh: Candidate for dynamic parallelism; Loop unrolling
for (int i = 0; i < 512; i++)
{
tempvar += temp;
#ifdef SHARED
shared_temp_array[witin_block_id] = tempvar;
__syncthreads();
xPlane[i] = shared_temp_array[witin_block_id];
#else
xPlane[i] = tempvar;
#endif
}
}
//=============================================================================
__global__ void map_to_luminance_domain_kernel1(float* float_img_in, float* L_hat)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
// Map from Pixel Domain [unitless] to Luminance Domain [cd/m^2] - (MAD eq. 1 and eq. 2)
#ifdef SHARED
shared_temp_array[witin_block_id] = pow((0.02874f*float_img_in[grid_index]), (2.2f / 3.0f));
__syncthreads();
L_hat[grid_index] = shared_temp_array[witin_block_id];
#else
L_hat[grid_index] = pow((0.02874f*float_img_in[grid_index]), (2.2f / 3.0f));
#endif
}
//=============================================================================
__global__ void map_to_luminance_domain_kernel2(float* float_img_in1, float* L_hat1, float* float_img_in2, float* L_hat2)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
// Map from Pixel Domain [unitless] to Luminance Domain [cd/m^2] - (MAD eq. 1 and eq. 2)
#ifdef SHARED
shared_temp_array[witin_block_id] = pow((0.02874f*float_img_in1[grid_index]), (2.2f / 3.0f));
L_hat1[grid_index] = shared_temp_array[witin_block_id];
shared_temp_array[witin_block_id] = pow((0.02874f*float_img_in1[grid_index]), (2.2f / 3.0f));
L_hat2[grid_index] = shared_temp_array[witin_block_id];
#else
L_hat1[grid_index] = pow((0.02874f*float_img_in1[grid_index]), (2.2f / 3.0f));
L_hat2[grid_index] = pow((0.02874f*float_img_in2[grid_index]), (2.2f / 3.0f));
#endif
}
//=============================================================================
__global__ void error_img_kernel(const float* ref, const float* dst, float* err)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
#ifdef SHARED
shared_temp_array[witin_block_id] = ref[grid_index] - dst[grid_index];
__syncthreads();
err[grid_index] = shared_temp_array[witin_block_id];
#else
err[grid_index] = ref[grid_index] - dst[grid_index];
#endif
}
//=============================================================================
__global__ void build_CSF_kernel(float* csf, const float* yPlane, const float* xPlane)
{
// Masking / luminance parameters
float k = 0.02874;
float G = 0.5; // luminance threshold
float C_slope = 1; // slope of detection threshold
float Ci_thrsh = -5; // contrast to start slope, rather than const threshold
float Cd_thrsh = -5; // saturated threshold
float ms_scale = 1; // scaling constant
float s_dbl, radfreq_dbl;
float temp = (2 * 32 / 512.0);
float tempvar = -(256.0) - 1 + 0.5f;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
#endif
//int idx = 0;
//for (register int i = 0; i < width; i++)
int i = blockIdx.x * blockDim.x + threadIdx.x; //Create unique Grid Index in x-dimension
{
float xVar = xPlane[i];
//for (register int j = 0; j < height; j++)
int j = blockIdx.y * blockDim.y + threadIdx.y; //Create unique Grid Index in y-dimension
{
float yVar = yPlane[j];
s_dbl = ((1 - 0.7f) / 2 * cos(4 * atan2(yVar, xVar))) + (1 + 0.7f) / 2;
radfreq_dbl = sqrt(xVar*xVar + yVar*yVar) / s_dbl;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = j * grid_width + i;
// (MAD eq. 3)
if (radfreq_dbl < 7.8909)
{
#ifdef SHARED
shared_temp_array[witin_block_id] = 0.9809f;
__syncthreads();
csf[grid_index] = shared_temp_array[witin_block_id];
#else
csf[grid_index] = 0.9809f;
#endif
}
else
{
float tmp_real = 2.6f*(0.0192f + 0.114f* radfreq_dbl)*exp(-pow((0.114f*radfreq_dbl), 1.1f));
#ifdef SHARED
shared_temp_array[witin_block_id] = tmp_real;
__syncthreads();
csf[grid_index] = shared_temp_array[witin_block_id];
#else
csf[grid_index] = tmp_real;
#endif
}
}
}
}
//=============================================================================
__global__ void fftShift_kernel(float* img)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
int k = 0;
#endif
//Vignesh : Reduce the conditional to single one
if (i < 256)
{
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (j < 256)
{
#ifdef SHARED
float temp = img[i * 512 + j];
img[i * 512 + j] = img[(i + 256) * 512 + (j + 256)];
img[(i + 256) * 512 + (j + 256)] = temp;
temp = img[(i + 256) * 512 + j];
img[(i + 256) * 512 + j] = img[i * 512 + (j + 256)];
img[i * 512 + (j + 256)] = temp;
#else
// Preserve spatial locality by storing data in registers
float temp = img[i * 512 + j];
img[i * 512 + j] = img[(i + 256) * 512 + (j + 256)];
img[(i + 256) * 512 + (j + 256)] = temp;
temp = img[(i + 256) * 512 + j];
img[(i + 256) * 512 + j] = img[i * 512 + (j + 256)];
img[i * 512 + (j + 256)] = temp;
#endif
}
}
}
//=============================================================================
__global__ void delta_stats_kernel(float *ref_outStd, float *ref_outSkw, float* ref_outKrt,
float* dst_outStd, float* dst_outSkw, float* dst_outKrt, float scale, float* eta)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int grid_index = index_y * grid_width + index_x;
//Vignesh: Optimization below where the value can be copied to register and use it rather than
//using global reference all the time
float delta_stat1 = abs(ref_outStd[grid_index] - dst_outStd[grid_index]);
float delta_stat2 = abs(ref_outSkw[grid_index] - dst_outSkw[grid_index]);
float delta_stat3 = abs(ref_outKrt[grid_index] - dst_outKrt[grid_index]);
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
// map the two 2D indices to a single linear, 1D index
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y;
shared_temp_array[witin_block_id] = scale*(delta_stat1 + 2 * delta_stat2 + delta_stat3);
__syncthreads();
eta[grid_index] += shared_temp_array[witin_block_id];
#else
eta[grid_index] += scale*(delta_stat1 + 2 * delta_stat2 + delta_stat3);
#endif
}
//=============================================================================
__global__ void fast_lo_stats_kernel(float* xVal, float* outStd, float* outSkw, float* outKrt)
{
//Declarations
//__shared__ float xVal_Shm[256];
float xVal_local[256];
float mean, stdev, skw, krt, stmp;
int iB, jB;
//for (i = 0; i<512 - 15; i += 4)
int i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
//for (j = 0; j<512 - 15; j += 4)
int j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497)
{
// THE FOLLOWING SET OF RUNNING SUMS CAN BE A set of PARALLEL REDUCTIONs (in shared memory?)
// 256 itteratios -> log2(256)=8 itterations
// Store block into registers (256 x 4Bytes = 1kB)
int idx = 0;
for (iB = i; iB < i + 16; iB++)
{
for (jB = j; jB < j + 16; jB++)
{
xVal_local[idx] = xVal[iB * 512 + jB];
idx++;
}
}
//Traverse through and get mean
float mean = 0;
for (idx = 0; idx < 256; idx++)
mean += xVal_local[idx]; //this can be a simple reduction in shared memory
mean = mean / 256.0f;
//Traverse through and get stdev, skew and kurtosis
stdev = 0;
skw = 0;
krt = 0;
float xV_mean = 0;
for (idx = 0; idx < 256; idx++)
{
// Place this commonly re-used value into a register to preserve temporal localitiy
xV_mean = xVal_local[idx] - mean;
stdev += xV_mean*xV_mean;
skw += xV_mean*xV_mean*xV_mean;
krt += xV_mean*xV_mean*xV_mean*xV_mean;
}
stmp = sqrt(stdev / 256.0f);
stdev = sqrt(stdev / 255.0f);//MATLAB's std is a bit different
if (stmp != 0){
skw = (skw / 256.0f) / ((stmp)*(stmp)*(stmp));
krt = (krt / 256.0f) / ((stmp)*(stmp)*(stmp)*(stmp));
}
else{
skw = 0;
krt = 0;
}
//---------------------------------------------------------------------------
// This is the nearest neighbor interpolation - ACTUALLY NOT NEEDED!!!!!!!!
// To remove the nested for loop here we need to modifie the algorithm to
// adjust for the pointwise muliplication done far later that uses a
// 512x512 dimension matrix derived from the matrices this kernel produces
// The modified output would be PxP (as described mathematically in the paper).
//---------------------------------------------------------------------------
// Only this final output should be written to global memory:
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
outStd[(iB * 512) + jB] = stdev;
outSkw[(iB * 512) + jB] = skw;
outKrt[(iB * 512) + jB] = krt;
}
}
}
}
}
//=============================================================================
__global__ void zeta_map_kernel(float* outMean, float* outStd, float* outStdMod, float* zeta)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int p = index_y * grid_width + index_x;
// Compute pth element of RMS contrast map C_err (MAD eq. 4 and eq. 5)
float C_org = log(outStdMod[p] / outMean[p]);
float C_err = log(outStd[p] / outMean[p]);
if (outMean[p] < 0.5)
C_err = -999999999999999; // log(0) = -infinity
// Compute local visibility distortion map (MAD eq. 6)
float delta = -5.0;
if ((C_err > C_org) && (C_org > delta))
zeta[p] = C_err - C_org;
else if ((C_err > delta) && (delta >= C_org))
zeta[p] = C_err - delta;
else
zeta[p] = 0;
}
//=============================================================================
__global__ void square_of_difference_kernel(float* ref, float* dst, float* out)
{
// Grab indices
int index_x = threadIdx.x + blockIdx.x * blockDim.x;
int index_y = threadIdx.y + blockIdx.y * blockDim.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int i = index_y * grid_width + index_x;
out[i] = (ref[i] - dst[i])*(ref[i] - dst[i]);
}
//=============================================================================
__global__ void LMSE_map_kernel(float* reflut, float* D)
{
// This is kernel #D14
//for (int j = 0; j < boundaray; j++)
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < N - 15)
{
int idx = LMSE_CONST + j*N;
//for (int i = 0; i < N - 15; i++)
int i = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N - 15)
{
int idx = (LMSE_CONST + j*N) + i - 1;
float temp_dbl = 0;
int sub_idx = idx - (7 + 7 * N);
for (int jB = j; jB < j + 16; ++jB)
{
for (int iB = i; iB < i + 16; ++iB)
{
temp_dbl += reflut[sub_idx];
++sub_idx;
}
sub_idx += N - 16;
}
D[idx] = temp_dbl / 256.0;
}
}
}
//=============================================================================
//=============================================================================
__global__ void fast_hi_stats_kernel1(float* absRefs, float* absDsts, float* outStd, float* outStdMod, float* outMean, float* ref_img, float* dst_img, float* TMP)
{
float mean, mean2, stdev;
int i, j, iB, jB;
i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497)
{
//Traverse through and get mean
mean = 0;
mean2 = 0;
for (iB = i; iB < i + 16; iB++)
{
for (jB = j; jB < j + 16; jB++)
{
mean += absRefs[(iB * 512) + jB];
mean2 += absDsts[(iB * 512) + jB];
}
}
mean = mean / 256.0f;
mean2 = mean2 / 256.0f;
//Traverse through and get stdev
stdev = 0;
for (iB = i; iB < i + 16; iB++)
{
for (jB = j; jB < j + 16; jB++)
{
float temp = absRefs[(iB * 512) + jB] - mean;
stdev += temp*temp;
}
}
stdev = sqrt(stdev / 255.0);//MATLAB's std is a bit different
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
outMean[(iB * 512) + jB] = mean2;// mean of reference
outStd[(iB * 512) + jB] = stdev;// stdev of dst
}
}
} // end for over j
} // end for over i
//====================================================================
//Modified STD
//for (i = 0; i < 512 - 15; i += 4)
i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
//for (j = 0; j < 512 - 15; j += 4)
j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497) //512-15=497
{
//Traverse through and get mean
mean = 0;
for (iB = i; iB < i + 8; iB++)
{
for (jB = j; jB < j + 8; jB++)
{
mean += absDsts[(iB * 512) + jB];
}
}
mean = mean / 64.0f;
//Traverse through and get stdev
stdev = 0;
for (iB = i; iB < i + 8; iB++)
{
for (jB = j; jB < j + 8; jB++)
{
float temp = absDsts[(iB * 512) + jB] - mean;
stdev += (temp)*(temp);
}
}
stdev = sqrt(stdev / 63.0);//MATLAB's std is a bit different
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
TMP[(iB * 512) + jB] = stdev;// stdev of ref
outStdMod[(iB * 512) + jB] = stdev;
}
}
}
}
}
__global__ void fast_hi_stats_kernel2(float* absRefs, float* absDsts, float* outStd, float* outStdMod, float* outMean, float* ref_img, float* dst_img, float* TMP)
{
//Declarations
float mean, mean2, stdev;
//float* TMP = (float *)malloc(N * N*sizeof(float));
int i, j, iB, jB;
//for (i = 0; i < 512 - 15; i += 4)
i = 4 * (threadIdx.x + blockIdx.x * blockDim.x);
if (i < 497) //512-15=497
{
//for (j = 0; j < 512 - 15; j += 4)
j = 4 * (threadIdx.y + blockIdx.y * blockDim.y);
if (j < 497) //512-15=497
{
mean = TMP[(i * 512) + j];
for (iB = i; iB < i + 8; iB += 5)
{
for (jB = j; jB < j + 8; jB += 5)
{
if (iB < 512 - 15 && jB < 512 - 15 && mean > TMP[(iB * 512) + jB])
mean = TMP[(iB * 512) + jB];
}
}
for (iB = i; iB < i + 4; iB++)
{
for (jB = j; jB < j + 4; jB++)
{
outStdMod[(iB * 512) + jB] = mean;
}
}
}
}
}
//=============================================================================
__global__ void product_array_kernel(float* out, float* in1, float* in2)
{
//for (int i = BLOCK_SIZE; i < N - BLOCK_SIZE - 1; i++)
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= BLOCK_SIZE && i < N - BLOCK_SIZE - 1)
{
//for (int j = BLOCK_SIZE; j < N - BLOCK_SIZE - 1; j++)
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (j >= BLOCK_SIZE && j < N - BLOCK_SIZE - 1)
out[i*N + j] = in1[i*N + j] * in2[i*N + j];
}
}
//=============================================================================
__global__ void buildGabor(float* logGabor, const int orientIdx, const int scaleIdx)
{
const float nOrient = 4.0f;
const float nScale = 5.0f;
const float sigmaOnf = 0.55f;
float wavelength[5] = { 3.0f, 9.0f, 27.0f, 81.0f, 243.0f };
float angl = orientIdx * PI / nOrient; // Calculate filter angle
#ifdef SHARED
__shared__ float shared_temp_array[SHARED_MEM];
#endif
// Construct the filter - first calculate the radial filter component.
float fo = 1.0f / wavelength[scaleIdx]; // Centre frequency of filter.
float rfo = fo / 0.5f; // Normalised radius from centre of frequency plane
// corresponding to fo.
//for (int i = 0; i < N; i++)
int i = threadIdx.x + blockDim.x * blockIdx.x; //Gives the x index in the grid
int j = threadIdx.y + blockDim.y * blockIdx.y; // Gives the y index in the grid
int witin_grid_id = i * blockDim.x * blockDim.y + j; //Gives overall thread index within grid
int witin_block_id = threadIdx.x * blockDim.x + threadIdx.y; //Gives the threadId within a block only
int lin_idx = i * N + j; //This is old implementation
//Going to use within_grid_id instead of lin_idx
//printf("i = %d j = %d inside_grid_id = %d inside_block_id = %d lin_idx = %d \n", i, j, witin_grid_id, witin_block_id, I, J, lin_idx);
// REDUCE GLOBAL MEMORY TRAFIC BY COMPUTING EACH INDIVIDUAL MATRIX VALUE IN EACH THREAD
float Y = -1 + i *0.003906f;
float X = -1 + j *0.003906f;
float sin_theta_temp = sin(atan2f(-Y, X));
float cos_theta_temp = cos(atan2f(-Y, X));
float radius_temp = sqrt(X * X + Y * Y);
float ds = sin_theta_temp * cos(angl) - cos_theta_temp * sin(angl); // Difference in sin
float dc = cos_theta_temp * cos(angl) + sin_theta_temp * sin(angl); // Difference in cos
float diffTheta = abs(atan2(ds, dc)); // Absolute angular distance
float spread = exp((-diffTheta * diffTheta) / (2 * thetaSigma * thetaSigma)); // Calculate the angular filter component.
float gabor = exp((-(log(radius_temp / rfo)) * (log(radius_temp / rfo)) / (2 * log(0.55f) * log(0.55f))));
#ifdef SHARED
shared_temp_array[witin_block_id] = spread * gabor;
__syncthreads();
logGabor[lin_idx] = shared_temp_array[witin_block_id];
#endif
#ifndef SHARED
logGabor[lin_idx] = spread * gabor;
#endif
// WHY TEST ALL VALUES?
// EXECUTE KERNEL THEN ONLY MODIFY THIS SINGLE VALUE IN ANOTHER KERNEL
if (lin_idx == 131328)// lin_idx = (N^2 + N)/2 = 131328 (N=512)
logGabor[lin_idx] = 0.0f; //Get rid of the 0 radius value
}
//=============================================================================
cudaError_t kernel_wrapper(const cv::Mat &mat_ref, const cv::Mat &mat_dst)
{
int GPU_N, device_num_used;
cudaGetDeviceCount(&GPU_N);
//OSU Workstation : 0 = Tesla, 1 = Titan1, 2 = Titan2
//ASU Workstation: 0 = Tesla, 1 = Quadro (don't use)
device_num_used = 0;
cudaError_t cudaStatus = cudaSetDevice(device_num_used); // OSU Workstation: 0=Tesla, 1=Titan1, 2=Titan2
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaSetDevice failed!");
// Allocate Page-locked (Pinned) HOST-memory
float* h_img_ref_float; cudaMallocHost(&h_img_ref_float, REAL_SIZE);
float* h_img_dst_float; cudaMallocHost(&h_img_dst_float, REAL_SIZE);
cufftComplex* h_ref_cufft; cudaMallocHost(&h_ref_cufft, COMPLEX_SIZE);
cufftComplex* h_dst_cufft; cudaMallocHost(&h_dst_cufft, COMPLEX_SIZE);
float* h_eta; cudaMallocHost(&h_eta, REAL_SIZE);
float* h_product; cudaMallocHost(&h_product, REAL_SIZE);
// Allocate DEVICE memory -- Appearance (Hi-Index)
float* d_img_ref_float; cudaMalloc((void **)&d_img_ref_float, REAL_SIZE);
float* d_img_dst_float; cudaMalloc((void **)&d_img_dst_float, REAL_SIZE);
float* d_L_hat_ref; cudaMalloc((void **)&d_L_hat_ref, REAL_SIZE);
float* d_L_hat_dst; cudaMalloc((void **)&d_L_hat_dst, REAL_SIZE);
cufftComplex* d_L_hat_ref_complex; cudaMalloc((void **)&d_L_hat_ref_complex, COMPLEX_SIZE);
cufftComplex* d_L_hat_dst_complex; cudaMalloc((void **)&d_L_hat_dst_complex, COMPLEX_SIZE);
float* d_CSF; cudaMalloc((void **)&d_CSF, REAL_SIZE);
float* d_xPlane; cudaMalloc((void **)&d_xPlane, 512 * sizeof(float));
float* d_yPlane; cudaMalloc((void **)&d_yPlane, 512 * sizeof(float));
float* d_I_prime_org; cudaMalloc((void **)&d_I_prime_org, REAL_SIZE);
float* d_I_prime_dst; cudaMalloc((void **)&d_I_prime_dst, REAL_SIZE);
float* d_I_prime_err; cudaMalloc((void **)&d_I_prime_err, REAL_SIZE);
float* d_outStd; cudaMalloc((void **)&d_outStd, REAL_SIZE);
float* d_outStdMod; cudaMalloc((void **)&d_outStdMod, REAL_SIZE);
float* d_outMean; cudaMalloc((void **)&d_outMean, REAL_SIZE);
float* d_reflut; cudaMalloc((void **)&d_reflut, REAL_SIZE);
float* d_TEMP; cudaMalloc((void **)&d_TEMP, REAL_SIZE);
float* d_zeta; cudaMalloc((void **)&d_zeta, REAL_SIZE);
float* d_lmse; cudaMalloc((void **)&d_lmse, REAL_SIZE);
float* d_product; cudaMalloc((void **)&d_product, REAL_SIZE);
// Allocate DEVICE memory -- Appearance (Lo-Index)
cufftComplex* d_ref_cufft; cudaMalloc((void **)&d_ref_cufft, COMPLEX_SIZE);
cufftComplex* d_dst_cufft; cudaMalloc((void **)&d_dst_cufft, COMPLEX_SIZE);
float* d_logGabor; cudaMalloc((void **)&d_logGabor, REAL_SIZE);
cufftComplex* d_ref_c; cudaMalloc((void **)&d_ref_c, COMPLEX_SIZE);
cufftComplex* d_dst_c; cudaMalloc((void **)&d_dst_c, COMPLEX_SIZE);
float* d_ref_c_mag; cudaMalloc((void **)&d_ref_c_mag, REAL_SIZE);
float* d_dst_c_mag; cudaMalloc((void **)&d_dst_c_mag, REAL_SIZE);
float* d_ref_Std; cudaMalloc((void **)&d_ref_Std, REAL_SIZE);
float* d_ref_Skw; cudaMalloc((void **)&d_ref_Skw, REAL_SIZE);
float* d_ref_Krt; cudaMalloc((void **)&d_ref_Krt, REAL_SIZE);
float* d_dst_Std; cudaMalloc((void **)&d_dst_Std, REAL_SIZE);
float* d_dst_Skw; cudaMalloc((void **)&d_dst_Skw, REAL_SIZE);
float* d_dst_Krt; cudaMalloc((void **)&d_dst_Krt, REAL_SIZE);
float* d_eta; cudaMalloc((void **)&d_eta, REAL_SIZE);
// Creates stream and cuFFT plans and set them in different streams
const int NUM_STREAMS = 10;
cudaStream_t stream[NUM_STREAMS];
cufftHandle* fftPlan = (cufftHandle*)malloc(sizeof(cufftHandle)*NUM_STREAMS);
for (int i = 0; i < NUM_STREAMS; i++)
{
cudaStreamCreate(&stream[i]);
cufftPlan2d(&fftPlan[i], N, N, CUFFT_C2C);
cufftSetStream(fftPlan[i], stream[i]);
}
// Configuration Parameters - EXPERIMENT WITH THESE TO DETERMINE OPTIMAL VALUES!!!!!!!
//(Launch most kernels as 4-dimensional functions - with overall 512x512 threads in grid):
dim3 gridSize(32, 32, 1);
dim3 blockSize(16, 16, 1);
//dim3 gridSize(2, 2, 1);
//dim3 blockSize(2, 2, 1);
dim3 fftShift_grid_size(16, 8, 1);
dim3 fftShift_block_size(32, 32, 1);
// The lo-stats kernels only need to be launced as (512^2)/4 threads due to
// the 4 pixel sliding window (i.e. only 12 pixel overlap in neighboring 16x16 blocks)
dim3 loStats_Grid_size(8, 8);
dim3 loStats_Block_size(16, 16);
//Constant memory copy
float h_nOrient = 4.0f;
float h_nScale = 5.0f;
float h_sigmaOnf = 0.55f;
float h_wavelength[5] = { 3.0f, 9.0f, 27.0f, 81.0f, 243.0f };
cudaMemcpyToSymbol(wavelength, h_wavelength, 5 * sizeof(float *)); //copy pointer *value* to constant memory
cudaMemcpyToSymbol(nOrient, &h_nOrient, sizeof(float));
cudaMemcpyToSymbol(nScale, &h_nScale, sizeof(float));
cudaMemcpyToSymbol(sigmaOnf, &h_sigmaOnf, sizeof(float));
//----------------------------------------------------------------------------
// Program initialization complete - Begin main program body:
//----------------------------------------------------------------------------
std::cout << "Beginning Detection Stage" << std::endl;
// Start CPU Timing
int itteration_num = 1;
double timing_sum = 0.0;
LARGE_INTEGER start_CPU, end_CPU, frequency_CPU;
float milliseconds_CPU;
QueryPerformanceFrequency(&frequency_CPU);
QueryPerformanceCounter(&start_CPU);
for (int timing_idx = 0; timing_idx < itteration_num; ++timing_idx)
{
// Begin NVTX Marker:
nvtxRangePushA("CUDA-MAD");
// Build CSF on Device
yPlane_CSF_kernel << < 1, 1, 0, stream[1] >> >(d_yPlane);
xPlane_CSF_kernel << < 1, 1, 0, stream[1] >> >(d_xPlane);
build_CSF_kernel << < gridSize, blockSize, 0, stream[1] >> >(d_CSF, d_yPlane, d_xPlane);
fftShift_kernel << < fftShift_grid_size, fftShift_block_size, 0, stream[1] >> >(d_CSF);
// Linearize REAL image data and copy data from HOST -> DEVICE
nvtxRangePushA("Linearize ref");// Begin NVTX Marker for Linearize ref
linearize_and_cast_from_Mat_to_float(mat_ref, h_img_ref_float);
nvtxRangePop(); // End NVTX Marker for Linearize ref
cudaMemcpyAsync(d_img_ref_float, h_img_ref_float, REAL_SIZE, cudaMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
//cudaMemcpy(d_img_ref_float, h_img_ref_float, REAL_SIZE, cudaMemcpyDeviceToHost); //DEVICE -> HOST
map_to_luminance_domain_kernel1 << < gridSize, blockSize, 0, stream[1] >> >(d_img_ref_float, d_L_hat_ref);
R2C_kernel << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_ref, d_L_hat_ref_complex);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_L_hat_ref_complex, (cufftComplex *)d_L_hat_ref_complex, CUFFT_FORWARD);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_ref_complex, d_CSF, d_L_hat_ref_complex);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_L_hat_ref_complex, (cufftComplex *)d_L_hat_ref_complex, CUFFT_INVERSE);
real_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_L_hat_ref_complex, d_I_prime_org);
nvtxRangePushA("Linearize dst");// Begin NVTX Marker for Linearize ref
linearize_and_cast_from_Mat_to_float(mat_dst, h_img_dst_float);
nvtxRangePop(); // End NVTX Marker for Linearize ref
cudaMemcpyAsync(d_img_dst_float, h_img_dst_float, REAL_SIZE, cudaMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
//cudaMemcpy(d_img_dst_float, h_img_dst_float, REAL_SIZE, cudaMemcpyDeviceToHost); //DEVICE -> HOST
map_to_luminance_domain_kernel1 << < gridSize, blockSize, 0, stream[1] >> >(d_img_dst_float, d_L_hat_dst);
R2C_kernel << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_dst, d_L_hat_dst_complex);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_L_hat_dst_complex, (cufftComplex *)d_L_hat_dst_complex, CUFFT_FORWARD);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_L_hat_dst_complex, d_CSF, d_L_hat_dst_complex);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_L_hat_dst_complex, (cufftComplex *)d_L_hat_dst_complex, CUFFT_INVERSE);
real_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_L_hat_dst_complex, d_I_prime_dst);
// Detection Statistics
square_of_difference_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_img_ref_float, d_img_dst_float, d_reflut);
LMSE_map_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_reflut, d_lmse);
error_img_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_I_prime_org, d_I_prime_dst, d_I_prime_err);
fast_hi_stats_kernel1 << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_I_prime_err, d_I_prime_org, d_outStd, d_outStdMod, d_outMean, d_img_ref_float, d_img_dst_float, d_TEMP);
fast_hi_stats_kernel2 << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_I_prime_err, d_I_prime_org, d_outStd, d_outStdMod, d_outMean, d_img_ref_float, d_img_dst_float, d_TEMP);
zeta_map_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_outMean, d_outStd, d_outStdMod, d_zeta);
product_array_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_product, d_zeta, d_lmse); // Product inside summation in MAD eq. 7
//cudaMemcpyAsync(h_product, d_product, REAL_SIZE, cudaMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Begin Gabor Filterbank:
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// Exectute "in-place" C2C 2D-DFT of REF (used in the LEFT side of the Gabor Filterbank)
R2C_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_img_ref_float, d_ref_cufft);
R2C_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_img_dst_float, d_dst_cufft);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_ref_cufft, (cufftComplex *)d_ref_cufft, CUFFT_FORWARD);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_dst_cufft, (cufftComplex *)d_dst_cufft, CUFFT_FORWARD);
float scale[5] = { 0.5, 0.75, 1, 5, 6 };
for (int o = 0; o < 4; o++)
{
for (int s = 0; s < 5; s++)
{
//buildGabor << < gridSize, blockSize, 0, stream[1] >> >(d_logGabor, o, s);
buildGabor << < gridSize, blockSize, 0, stream[1] >> >(d_logGabor, o, s);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf("Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
fftShift_kernel << < fftShift_grid_size, fftShift_block_size, 0, stream[1] >> >(d_logGabor);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_ref_cufft, d_logGabor, d_ref_c);
pointWise_complex_matrix_mult_kernel_2d << < gridSize, blockSize, 0, stream[1] >> >(d_dst_cufft, d_logGabor, d_dst_c);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_ref_c, (cufftComplex *)d_ref_c, CUFFT_INVERSE);
cufftExecC2C(fftPlan[1], (cufftComplex *)d_dst_c, (cufftComplex *)d_dst_c, CUFFT_INVERSE);
magnitude_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_ref_c, d_ref_c_mag);
magnitude_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_dst_c, d_dst_c_mag);
fast_lo_stats_kernel << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_ref_c_mag, d_ref_Std, d_ref_Skw, d_ref_Krt);
fast_lo_stats_kernel << <loStats_Grid_size, loStats_Block_size, 0, stream[1] >> >(d_dst_c_mag, d_dst_Std, d_dst_Skw, d_dst_Krt);
delta_stats_kernel << <gridSize, blockSize, 0, stream[1] >> >(d_ref_Std, d_ref_Skw, d_ref_Krt,
d_dst_Std, d_dst_Skw, d_dst_Krt, scale[s] / 13.25f, d_eta);
}
}
// Copy final eta map back to HOST for collapse (NEEDS TO BE DONE VIA REDUCTION)!
//cudaMemcpyAsync(h_eta, d_eta, REAL_SIZE, cudaMemcpyDeviceToHost, stream[1]); //DEVICE -> HOST
// Host Code waits here on memcpy
cudaMemcpy(h_product, d_product, REAL_SIZE, cudaMemcpyDeviceToHost); //DEVICE -> HOST
// Collapse the visibility-weighted local MSE via L2-norm (MAD eq. 7)
nvtxRangePushA("CPU Detection Map Collapse");
float d_detect = reduce_sum_of_squares_2D_CPU(h_product, BLOCK_SIZE, N - BLOCK_SIZE - 1);
d_detect = sqrt(d_detect) / sqrt(229441.0f); // Number of itterations in loop: counter = 229441
d_detect = d_detect * 200;
nvtxRangePop();
cudaMemcpy(h_eta, d_eta, REAL_SIZE, cudaMemcpyDeviceToHost); //DEVICE -> HOST
nvtxRangePushA("CPU Appearance Map Collapse");
float d_appear = reduce_sum_of_squares_2D_CPU(h_eta, BLOCK_SIZE, N - BLOCK_SIZE);
d_appear = sqrt(d_appear) / 479.0f;
nvtxRangePop();
float beta1 = 0.467;
float beta2 = 0.130;
float alpha = 1 / (1 + beta1*pow(d_detect, beta2));
float MAD = pow(d_detect, alpha)*pow(d_appear, 1 - alpha);
// End NVTX Marker for CUDA-MAD:
nvtxRangePop();
// End CPU Timing
QueryPerformanceCounter(&end_CPU);
milliseconds_CPU = (end_CPU.QuadPart - start_CPU.QuadPart) *
1000.0 / frequency_CPU.QuadPart;
timing_sum += milliseconds_CPU;
std::cout << "Hi-Index d_detect = " << d_detect << std::endl;
std::cout << "Lo-Index d_appear = " << d_appear << std::endl;
std::cout << "\nMAD = " << MAD << std::endl;
} // End timing loop
fprintf(stderr, "\nTime = %.3f ms\n", timing_sum / double(itteration_num));
//----------------------------------------------------------------------------
// Main program body complete - Perform closing operations:
//----------------------------------------------------------------------------
//Error:
// De-allocate memory here...
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaDeviceReset();
#ifndef PROFILING
getch();
#endif
return cudaStatus;
}
//=============================================================================
void linearize_and_cast_from_Mat_to_float(const cv::Mat& mat_in, float* h_float)
{
for (int row = 0; row < 512; row++)
for (int col = 0; col < 512; col++)
h_float[row * 512 + col] = static_cast<float>(mat_in.at<unsigned char>(row, col));
}
//=============================================================================
float reduce_sum_of_squares_2D_CPU(float* in, const int INSIDE_BOUND, const int OUTSIDE_BOUND)
{
float sum = 0.0f;
for (int i = INSIDE_BOUND; i < OUTSIDE_BOUND; i++)
{
for (int j = INSIDE_BOUND; j < OUTSIDE_BOUND; j++)
sum += in[i*N + j] * in[i*N + j];
}
return sum;
}
//=============================================================================
void write_to_file_DEBUG(float* w, const int SIZE)
{
std::ofstream outFile;
outFile.open("TEST.txt");
for (int i = 0; i < SIZE; i++) // Itterate over rows
{
for (int j = 0; j < SIZE; j++) // Itterate over cols
outFile << w[i * SIZE + j] << " ";
if (i != SIZE - 1)
outFile << ";\n";
}
outFile.close();
}
//=============================================================================
|
387936b5cb460fd8d097f56552b417b5f5dfd113.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "random.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
hiprandState_t *global_state = NULL;
hipMalloc(&global_state, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
random), dim3(gridBlock),dim3(threadBlock), 0, 0, x,global_state);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
random), dim3(gridBlock),dim3(threadBlock), 0, 0, x,global_state);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
random), dim3(gridBlock),dim3(threadBlock), 0, 0, x,global_state);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 387936b5cb460fd8d097f56552b417b5f5dfd113.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "random.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
curandState *global_state = NULL;
cudaMalloc(&global_state, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
random<<<gridBlock,threadBlock>>>(x,global_state);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
random<<<gridBlock,threadBlock>>>(x,global_state);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
random<<<gridBlock,threadBlock>>>(x,global_state);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7a6db01583697079d0e1f65d2279edc86aa4fbde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MaskRcnnInferencePlugin.h"
namespace nvinfer1 {
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }
__global__ void MaskRcnnInferenceKernel(
const int nthreads,
const int detections_per_im,
const int output_size,
const int num_classes,
const float* indices,
const float* masks,
float* out_masks) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int ind = index / output_size / output_size / num_classes;
int ind_class = indices[ind];
int cur_class = index / output_size / output_size % num_classes;
if (ind_class == cur_class) {
int w = index % output_size;
int h = index / output_size % output_size;
int tmp = ind * num_classes * output_size * output_size +
cur_class * output_size*output_size + h * output_size + w;
float maskVal = masks[ind * num_classes * output_size *
output_size + cur_class * output_size * output_size +
h * output_size + w];
out_masks[ind * output_size * output_size + h * output_size + w] = Logist(maskVal);
}
}
}
int maskRcnnInference(int batchSize,
const void *const *inputs, void **outputs,
int detections_per_im, int output_size, int num_classes, hipStream_t stream) {
for (int batch = 0; batch < batchSize; batch++) {
auto in_indices = static_cast<const float *>(inputs[0]) + batch * detections_per_im;
auto in_masks = static_cast<const float *>(inputs[1]) + batch * detections_per_im *
num_classes * output_size * output_size;
auto out_masks = static_cast<float *>(outputs[0]) + batch * detections_per_im * output_size * output_size;
int nthreads = detections_per_im * num_classes * output_size * output_size;
const int max_threads = 1024;
int blocksPerGrid = ceil(static_cast<float>(nthreads) / max_threads);
// TODO: can implement this function with thrust?
MaskRcnnInferenceKernel << <blocksPerGrid, max_threads, 0, stream >> > (
nthreads,
detections_per_im,
output_size,
num_classes,
in_indices,
in_masks,
out_masks);
hipDeviceSynchronize();
}
return 0;
}
} // namespace nvinfer1
| 7a6db01583697079d0e1f65d2279edc86aa4fbde.cu | #include "MaskRcnnInferencePlugin.h"
namespace nvinfer1 {
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }
__global__ void MaskRcnnInferenceKernel(
const int nthreads,
const int detections_per_im,
const int output_size,
const int num_classes,
const float* indices,
const float* masks,
float* out_masks) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int ind = index / output_size / output_size / num_classes;
int ind_class = indices[ind];
int cur_class = index / output_size / output_size % num_classes;
if (ind_class == cur_class) {
int w = index % output_size;
int h = index / output_size % output_size;
int tmp = ind * num_classes * output_size * output_size +
cur_class * output_size*output_size + h * output_size + w;
float maskVal = masks[ind * num_classes * output_size *
output_size + cur_class * output_size * output_size +
h * output_size + w];
out_masks[ind * output_size * output_size + h * output_size + w] = Logist(maskVal);
}
}
}
int maskRcnnInference(int batchSize,
const void *const *inputs, void **outputs,
int detections_per_im, int output_size, int num_classes, cudaStream_t stream) {
for (int batch = 0; batch < batchSize; batch++) {
auto in_indices = static_cast<const float *>(inputs[0]) + batch * detections_per_im;
auto in_masks = static_cast<const float *>(inputs[1]) + batch * detections_per_im *
num_classes * output_size * output_size;
auto out_masks = static_cast<float *>(outputs[0]) + batch * detections_per_im * output_size * output_size;
int nthreads = detections_per_im * num_classes * output_size * output_size;
const int max_threads = 1024;
int blocksPerGrid = ceil(static_cast<float>(nthreads) / max_threads);
// TODO: can implement this function with thrust?
MaskRcnnInferenceKernel << <blocksPerGrid, max_threads, 0, stream >> > (
nthreads,
detections_per_im,
output_size,
num_classes,
in_indices,
in_masks,
out_masks);
cudaDeviceSynchronize();
}
return 0;
}
} // namespace nvinfer1
|
4db712de31f351a57e1e67aa5b7c31eaa7f0dbb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
} | 4db712de31f351a57e1e67aa5b7c31eaa7f0dbb8.cu | #include "includes.h"
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
} |
75b8b9427c49551554df27843f929ca737732d13.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2018,2019,2020,2021,2022,2023 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/ifft.hpp>
#include <nbla/cuda/function/utils/fft.cuh>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T> IFFTCuda<T>::~IFFTCuda() {}
template <typename T>
void IFFTCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
IFFT<T>::setup_impl(inputs, outputs);
// Compute scale and store the original shape (i.e, n)
Shape_t oshape(outputs[0]->shape());
Size_t base_axis_output = oshape.size() - 1 - this->signal_ndim_;
signal_size_ = 1;
n_.clear();
for (int i = 0; i < this->signal_ndim_; i++) {
signal_size_ *= oshape[base_axis_output + i];
n_.push_back(oshape[base_axis_output + i]);
}
}
template <typename T>
void IFFTCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
// IFFT
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
exec_cufft<Tcu>(this->ctx_, x, y, inputs[0]->shape(), outputs[0]->shape(),
plan_forward_, true, true, HIPFFT_BACKWARD, this->n_,
this->signal_ndim_);
// Normalize
const Size_t size = outputs[0]->size();
if (this->normalized_) {
const float scale = 1.f / std::sqrt(this->signal_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
y);
} else {
const float scale = 1.f / this->signal_size_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
y);
}
}
template <typename T>
void IFFTCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
cuda_set_device(this->device_);
const Size_t size = inputs[0]->size();
if (accum[0]) {
// Create tmp array
NdArrayPtr ndarray = make_shared<NdArray>(inputs[0]->shape());
// FFT
const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
Tcu *tmp_buff = ndarray->cast(get_dtype<Tcu>(), this->ctx_)->pointer<Tcu>();
exec_cufft<Tcu>(this->ctx_, dy, tmp_buff, outputs[0]->shape(),
inputs[0]->shape(), plan_backward_, true, true,
HIPFFT_FORWARD, this->n_, this->signal_ndim_);
// Normalize
const Size_t size = inputs[0]->size();
if (this->normalized_) {
const float scale = 1.f / std::sqrt(this->signal_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
tmp_buff);
} else {
const float scale = 1.f / this->signal_size_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
tmp_buff);
}
// Accumulation
Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_add_cufft_result, size, tmp_buff, dx);
} else {
// FFT
const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
exec_cufft<Tcu>(this->ctx_, dy, dx, outputs[0]->shape(), inputs[0]->shape(),
plan_backward_, true, true, HIPFFT_FORWARD, this->n_,
this->signal_ndim_);
// Normalize
const Size_t size = inputs[0]->size();
if (this->normalized_) {
const float scale = 1.f / std::sqrt(this->signal_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
dx);
} else {
const float scale = 1.f / this->signal_size_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
dx);
}
}
}
} // namespace nbla
| 75b8b9427c49551554df27843f929ca737732d13.cu | // Copyright 2018,2019,2020,2021,2022,2023 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/ifft.hpp>
#include <nbla/cuda/function/utils/fft.cuh>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T> IFFTCuda<T>::~IFFTCuda() {}
template <typename T>
void IFFTCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
IFFT<T>::setup_impl(inputs, outputs);
// Compute scale and store the original shape (i.e, n)
Shape_t oshape(outputs[0]->shape());
Size_t base_axis_output = oshape.size() - 1 - this->signal_ndim_;
signal_size_ = 1;
n_.clear();
for (int i = 0; i < this->signal_ndim_; i++) {
signal_size_ *= oshape[base_axis_output + i];
n_.push_back(oshape[base_axis_output + i]);
}
}
template <typename T>
void IFFTCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
// IFFT
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
exec_cufft<Tcu>(this->ctx_, x, y, inputs[0]->shape(), outputs[0]->shape(),
plan_forward_, true, true, CUFFT_INVERSE, this->n_,
this->signal_ndim_);
// Normalize
const Size_t size = outputs[0]->size();
if (this->normalized_) {
const float scale = 1.f / std::sqrt(this->signal_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
y);
} else {
const float scale = 1.f / this->signal_size_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
y);
}
}
template <typename T>
void IFFTCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
cuda_set_device(this->device_);
const Size_t size = inputs[0]->size();
if (accum[0]) {
// Create tmp array
NdArrayPtr ndarray = make_shared<NdArray>(inputs[0]->shape());
// FFT
const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
Tcu *tmp_buff = ndarray->cast(get_dtype<Tcu>(), this->ctx_)->pointer<Tcu>();
exec_cufft<Tcu>(this->ctx_, dy, tmp_buff, outputs[0]->shape(),
inputs[0]->shape(), plan_backward_, true, true,
CUFFT_FORWARD, this->n_, this->signal_ndim_);
// Normalize
const Size_t size = inputs[0]->size();
if (this->normalized_) {
const float scale = 1.f / std::sqrt(this->signal_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
tmp_buff);
} else {
const float scale = 1.f / this->signal_size_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
tmp_buff);
}
// Accumulation
Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_add_cufft_result, size, tmp_buff, dx);
} else {
// FFT
const Tcu *dy = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
Tcu *dx = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
exec_cufft<Tcu>(this->ctx_, dy, dx, outputs[0]->shape(), inputs[0]->shape(),
plan_backward_, true, true, CUFFT_FORWARD, this->n_,
this->signal_ndim_);
// Normalize
const Size_t size = inputs[0]->size();
if (this->normalized_) {
const float scale = 1.f / std::sqrt(this->signal_size_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
dx);
} else {
const float scale = 1.f / this->signal_size_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_normalize_cufft_result, size, scale,
dx);
}
}
}
} // namespace nbla
|
e04bdac090f0041f61731ff018d396c531c1eac5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/NamedTensorUtils.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
template<typename scalar_t>
void addr_impl_ger_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
static_assert(std::is_same<scalar_t, float>::value ||
std::is_same<scalar_t, double>::value,
"addr_impl_ger_cuda: only float and double are supported");
if (&out != &self) {
at::native::resize_as_(out, self);
at::native::copy_(out, self);
}
if (beta == 0.0) {
at::native::zero_(out);
}
if (beta != 1.0) {
at::native::mul_(out, beta);
}
if (out.stride(0) == 1) {
at::cuda::blas::ger<scalar_t>(
vec1.size(0), vec2.size(0), alpha,
vec1.data_ptr<scalar_t>(), vec1.stride(0),
vec2.data_ptr<scalar_t>(), vec2.stride(0),
out.data_ptr<scalar_t>(), out.stride(1)
);
} else if (out.stride(1) == 1) {
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
} else {
Tensor cr = out.clone();
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
out.set_(cr);
}
}
template<typename scalar_t>
void addr_impl_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
// currently no Hger/SgerEx in Cublas.
Tensor vec2T = vec2.reshape({1, vec2.size(0)});
Tensor vec1M = vec1.reshape({vec1.size(0), 1});
addmm_out_cuda(out, self, vec1M, vec2T, beta, alpha);
}
template<>
void addr_impl_cuda<float>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
float alpha, float beta) {
addr_impl_ger_cuda<float>(out, self, vec1, vec2, alpha, beta);
}
template<>
void addr_impl_cuda<double>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
double alpha, double beta) {
addr_impl_ger_cuda<double>(out, self, vec1, vec2, alpha, beta);
}
Tensor& addr_out_cuda(Tensor &out, const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(vec1.dim() == 1 && vec2.dim() == 1,
"vec1 and vec2 should be 1-dimensional vectors. Got dimensions ",
vec1.dim(), " and ", vec2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {vec1.size(0), vec2.size(0)}, "addr");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == vec1.device() &&
out.device() == vec2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
vec1.device(), " and ", vec2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
TORCH_CHECK(self_.size(0) == vec1.size(0) && self_.size(1) == vec2.size(0),
"size mismatch",
", input: ", self_.sizes(),
", v1: ", vec1.sizes(),
", v2: ", vec2.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self_.scalar_type(), "addr_out_cuda", [&] {
addr_impl_cuda<scalar_t>(out, self_, vec1, vec2,
alpha.to<scalar_t>(), beta.to<scalar_t>());
});
return out;
}
Tensor& addr__cuda(Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
addr_out_cuda(self, self, vec1, vec2, beta, alpha);
return self;
}
Tensor addr_cuda(const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addr_out_cuda(out, self, vec1, vec2, beta, alpha);
return out;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
} }
| e04bdac090f0041f61731ff018d396c531c1eac5.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/NamedTensorUtils.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
template<typename scalar_t>
void addr_impl_ger_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
static_assert(std::is_same<scalar_t, float>::value ||
std::is_same<scalar_t, double>::value,
"addr_impl_ger_cuda: only float and double are supported");
if (&out != &self) {
at::native::resize_as_(out, self);
at::native::copy_(out, self);
}
if (beta == 0.0) {
at::native::zero_(out);
}
if (beta != 1.0) {
at::native::mul_(out, beta);
}
if (out.stride(0) == 1) {
at::cuda::blas::ger<scalar_t>(
vec1.size(0), vec2.size(0), alpha,
vec1.data_ptr<scalar_t>(), vec1.stride(0),
vec2.data_ptr<scalar_t>(), vec2.stride(0),
out.data_ptr<scalar_t>(), out.stride(1)
);
} else if (out.stride(1) == 1) {
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
} else {
Tensor cr = out.clone();
at::cuda::blas::ger<scalar_t>(
vec2.size(0), vec1.size(0), alpha,
vec2.data_ptr<scalar_t>(), vec2.stride(0),
vec1.data_ptr<scalar_t>(), vec1.stride(0),
out.data_ptr<scalar_t>(), out.stride(0)
);
out.set_(cr);
}
}
template<typename scalar_t>
void addr_impl_cuda(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
scalar_t alpha, scalar_t beta) {
// currently no Hger/SgerEx in Cublas.
Tensor vec2T = vec2.reshape({1, vec2.size(0)});
Tensor vec1M = vec1.reshape({vec1.size(0), 1});
addmm_out_cuda(out, self, vec1M, vec2T, beta, alpha);
}
template<>
void addr_impl_cuda<float>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
float alpha, float beta) {
addr_impl_ger_cuda<float>(out, self, vec1, vec2, alpha, beta);
}
template<>
void addr_impl_cuda<double>(Tensor &out, const Tensor &self,
const Tensor& vec1, const Tensor& vec2,
double alpha, double beta) {
addr_impl_ger_cuda<double>(out, self, vec1, vec2, alpha, beta);
}
Tensor& addr_out_cuda(Tensor &out, const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(vec1.dim() == 1 && vec2.dim() == 1,
"vec1 and vec2 should be 1-dimensional vectors. Got dimensions ",
vec1.dim(), " and ", vec2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {vec1.size(0), vec2.size(0)}, "addr");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == vec1.device() &&
out.device() == vec2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
vec1.device(), " and ", vec2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
TORCH_CHECK(self_.size(0) == vec1.size(0) && self_.size(1) == vec2.size(0),
"size mismatch",
", input: ", self_.sizes(),
", v1: ", vec1.sizes(),
", v2: ", vec2.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self_.scalar_type(), "addr_out_cuda", [&] {
addr_impl_cuda<scalar_t>(out, self_, vec1, vec2,
alpha.to<scalar_t>(), beta.to<scalar_t>());
});
return out;
}
Tensor& addr__cuda(Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
addr_out_cuda(self, self, vec1, vec2, beta, alpha);
return self;
}
Tensor addr_cuda(const Tensor& self,
const Tensor& vec1, const Tensor& vec2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addr_out_cuda(out, self, vec1, vec2, beta, alpha);
return out;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
} }
|
ffb71225b038af6a5c3262d0176d2d1483b62ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ReductionExecution.hpp"
namespace MNN {
namespace CUDA {
ReductionExecution::ReductionExecution(ReductionType opType, int axis, Backend *backend) : Execution(backend) {
mType = opType;
mAxis = axis;
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mParam = staticPool->alloc(sizeof(ReduceParam));
}
ReductionExecution::~ ReductionExecution() {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mParam);
}
ErrorCode ReductionExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = 1;
int outside = 1;
int axis = inputs[0]->length(mAxis);
for (int i=0; i<mAxis; ++i) {
outside *= inputs[0]->length(i);
}
for (int i=mAxis+1; i<inputs[0]->dimensions(); ++i) {
inside *= inputs[0]->length(i);
}
mCpuParam.inside = inside;
mCpuParam.outside = outside;
mCpuParam.axis = axis;
cuda_check(hipMemcpy((uint8_t*)mParam.first + mParam.second, &mCpuParam, sizeof(ReduceParam), hipMemcpyHostToDevice));
//MNN_PRINT("Reduction axis_idx:%d, outside:%d, axis:%d, inside:%d\n", mAxis, outside, axis, inside);
return NO_ERROR;
}
ErrorCode ReductionExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = (void*)inputs[0]->deviceId();
auto output = (void*)outputs[0]->deviceId();
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = mCpuParam.inside;;
int outside = mCpuParam.outside;
int count = inside * outside;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto param = (ReduceParam*)((uint8_t*)mParam.first + mParam.second);
if (inputs[0]->getType() == halide_type_of<float>()) {
if (static_cast<CUDABackend*>(backend())->useFp16()) {
switch (mType) {
case ReductionType_MEAN:
hipLaunchKernelGGL(( MEAN), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_SUM:
hipLaunchKernelGGL(( SUM), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_MINIMUM:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_MAXIMUM:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_PROD:
hipLaunchKernelGGL(( PROD), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output, param);
return NO_ERROR;
}
} else {
switch (mType) {
case ReductionType_MEAN:
hipLaunchKernelGGL(( MEAN), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_SUM:
hipLaunchKernelGGL(( SUM), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_MINIMUM:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_MAXIMUM:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_PROD:
hipLaunchKernelGGL(( PROD), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, param);
return NO_ERROR;
}
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
MNN_ASSERT(inputs[0]->getType() == halide_type_of<int32_t>());
switch (mType) {
case ReductionType_MEAN:
hipLaunchKernelGGL(( MEAN), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_SUM:
hipLaunchKernelGGL(( SUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_MINIMUM:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_MAXIMUM:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_PROD:
hipLaunchKernelGGL(( PROD), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_ANY:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_ALL:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
class ReductionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto type = inputs[0]->getType();
if (type.bits != 32) {
return nullptr;
}
if (type.code != halide_type_float && type.code != halide_type_int) {
return nullptr;
}
auto axis = op->main_as_ReductionParam()->dim()->data()[0];
auto opType = op->main_as_ReductionParam()->operation();
return new ReductionExecution(opType, axis, backend);
}
};
static CUDACreatorRegister<ReductionCreator> __init(OpType_Reduction);
}
}
| ffb71225b038af6a5c3262d0176d2d1483b62ea9.cu | #include "ReductionExecution.hpp"
namespace MNN {
namespace CUDA {
ReductionExecution::ReductionExecution(ReductionType opType, int axis, Backend *backend) : Execution(backend) {
mType = opType;
mAxis = axis;
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mParam = staticPool->alloc(sizeof(ReduceParam));
}
ReductionExecution::~ ReductionExecution() {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mParam);
}
ErrorCode ReductionExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = 1;
int outside = 1;
int axis = inputs[0]->length(mAxis);
for (int i=0; i<mAxis; ++i) {
outside *= inputs[0]->length(i);
}
for (int i=mAxis+1; i<inputs[0]->dimensions(); ++i) {
inside *= inputs[0]->length(i);
}
mCpuParam.inside = inside;
mCpuParam.outside = outside;
mCpuParam.axis = axis;
cuda_check(cudaMemcpy((uint8_t*)mParam.first + mParam.second, &mCpuParam, sizeof(ReduceParam), cudaMemcpyHostToDevice));
//MNN_PRINT("Reduction axis_idx:%d, outside:%d, axis:%d, inside:%d\n", mAxis, outside, axis, inside);
return NO_ERROR;
}
ErrorCode ReductionExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = (void*)inputs[0]->deviceId();
auto output = (void*)outputs[0]->deviceId();
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = mCpuParam.inside;;
int outside = mCpuParam.outside;
int count = inside * outside;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
auto param = (ReduceParam*)((uint8_t*)mParam.first + mParam.second);
if (inputs[0]->getType() == halide_type_of<float>()) {
if (static_cast<CUDABackend*>(backend())->useFp16()) {
switch (mType) {
case ReductionType_MEAN:
MEAN<<<block_num, threads_num>>>((const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_SUM:
SUM<<<block_num, threads_num>>>((const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_MINIMUM:
MINIMUM<<<block_num, threads_num>>>((const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_MAXIMUM:
MAXIMUM<<<block_num, threads_num>>>((const half*)input, (half*)output, param);
return NO_ERROR;
case ReductionType_PROD:
PROD<<<block_num, threads_num>>>((const half*)input, (half*)output, param);
return NO_ERROR;
}
} else {
switch (mType) {
case ReductionType_MEAN:
MEAN<<<block_num, threads_num>>>((const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_SUM:
SUM<<<block_num, threads_num>>>((const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_MINIMUM:
MINIMUM<<<block_num, threads_num>>>((const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_MAXIMUM:
MAXIMUM<<<block_num, threads_num>>>((const float*)input, (float*)output, param);
return NO_ERROR;
case ReductionType_PROD:
PROD<<<block_num, threads_num>>>((const float*)input, (float*)output, param);
return NO_ERROR;
}
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
MNN_ASSERT(inputs[0]->getType() == halide_type_of<int32_t>());
switch (mType) {
case ReductionType_MEAN:
MEAN<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_SUM:
SUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_MINIMUM:
MINIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_MAXIMUM:
MAXIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_PROD:
PROD<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_ANY:
MAXIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
case ReductionType_ALL:
MINIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, param);
return NO_ERROR;
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
class ReductionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto type = inputs[0]->getType();
if (type.bits != 32) {
return nullptr;
}
if (type.code != halide_type_float && type.code != halide_type_int) {
return nullptr;
}
auto axis = op->main_as_ReductionParam()->dim()->data()[0];
auto opType = op->main_as_ReductionParam()->operation();
return new ReductionExecution(opType, axis, backend);
}
};
static CUDACreatorRegister<ReductionCreator> __init(OpType_Reduction);
}
}
|
bdce81884cff5306265633d84c027bdc49aa761a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_transpose.h"
//The following modified from cuda sdk-5.0
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
static __global__ void transposeNoBankConflicts(double *odata, const double *idata, const int width, const int height)
{
__shared__ double tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
int xIndex,yIndex,index_in,index_out;
xIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
index_in = xIndex + (yIndex)*width;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(xIndex < width && yIndex+i < height){
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];}
}
__syncthreads();
xIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_out = xIndex + (yIndex)*height;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(yIndex+i < width && xIndex < height){
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];}
}
}
void cuda_transpose(double* odata, double* idata, const int width, const int height)
{
// Put df/dy in "normal" order
dim3 nBlocks2D,nThreads2D;
nThreads2D.x = TRANSPOSE_TILE_DIM;
nThreads2D.y = TRANSPOSE_BLOCK_ROWS;
nBlocks2D.x = (width+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
nBlocks2D.y = (height+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(nBlocks2D),dim3(nThreads2D), 0, 0, odata,idata,width,height);
#ifdef ZERORK_FULL_DEBUG
cudaErrChk( hipPeekAtLastError() , 1);
cudaErrChk( hipDeviceSynchronize() , 1);
#endif
}
| bdce81884cff5306265633d84c027bdc49aa761a.cu |
#include "cuda_transpose.h"
//The following modified from cuda sdk-5.0
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
static __global__ void transposeNoBankConflicts(double *odata, const double *idata, const int width, const int height)
{
__shared__ double tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1];
int xIndex,yIndex,index_in,index_out;
xIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
index_in = xIndex + (yIndex)*width;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(xIndex < width && yIndex+i < height){
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];}
}
__syncthreads();
xIndex = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
index_out = xIndex + (yIndex)*height;
for (int i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS)
{
if(yIndex+i < width && xIndex < height){
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];}
}
}
void cuda_transpose(double* odata, double* idata, const int width, const int height)
{
// Put df/dy in "normal" order
dim3 nBlocks2D,nThreads2D;
nThreads2D.x = TRANSPOSE_TILE_DIM;
nThreads2D.y = TRANSPOSE_BLOCK_ROWS;
nBlocks2D.x = (width+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
nBlocks2D.y = (height+TRANSPOSE_TILE_DIM-1)/TRANSPOSE_TILE_DIM;
transposeNoBankConflicts<<<nBlocks2D,nThreads2D>>>(odata,idata,width,height);
#ifdef ZERORK_FULL_DEBUG
cudaErrChk( cudaPeekAtLastError() , 1);
cudaErrChk( cudaDeviceSynchronize() , 1);
#endif
}
|
523ee2a5b775ceb970ac88d595b18cf706d50a8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlaset.cu normal z -> d, Tue Feb 9 16:05:31 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd.
*/
static __device__
void dlaset_full_device(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_D_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlaset, dlacpy, zlat2c, clat2z.
*/
static __device__
void dlaset_lower_device(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlaset, dlacpy, zlat2c, clat2z.
*/
static __device__
void dlaset_upper_device(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void dlaset_full_kernel(
int m, int n,
double offdiag, double diag,
double *dA, int ldda )
{
dlaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void dlaset_lower_kernel(
int m, int n,
double offdiag, double diag,
double *dA, int ldda )
{
dlaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void dlaset_upper_kernel(
int m, int n,
double offdiag, double diag,
double *dA, int ldda )
{
dlaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void dlaset_full_kernel_batched(
int m, int n,
double offdiag, double diag,
double **dAarray, int ldda )
{
int batchid = blockIdx.z;
dlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void dlaset_lower_kernel_batched(
int m, int n,
double offdiag, double diag,
double **dAarray, int ldda )
{
int batchid = blockIdx.z;
dlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void dlaset_upper_kernel_batched(
int m, int n,
double offdiag, double diag,
double **dAarray, int ldda )
{
int batchid = blockIdx.z;
dlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DLASET initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag DOUBLE PRECISION
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag DOUBLE PRECISION
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
and A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C"
void magmablas_dlaset_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( dlaset_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( dlaset_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, hipMemset is faster.
// TODO: use hipMemset2D ?
if ( m == ldda &&
MAGMA_D_EQUAL( offdiag, MAGMA_D_ZERO ) &&
MAGMA_D_EQUAL( diag, MAGMA_D_ZERO ) )
{
size_t size = m*n;
hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(double), queue->cuda_stream() );
assert( err == hipSuccess );
}
else {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( dlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void magmablas_dlaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
magmaDouble_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( dlaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( dlaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else {
hipLaunchKernelGGL(( dlaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
}
| 523ee2a5b775ceb970ac88d595b18cf706d50a8d.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlaset.cu normal z -> d, Tue Feb 9 16:05:31 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd.
*/
static __device__
void dlaset_full_device(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_D_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlaset, dlacpy, zlat2c, clat2z.
*/
static __device__
void dlaset_lower_device(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to dlaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlaset, dlacpy, zlat2c, clat2z.
*/
static __device__
void dlaset_upper_device(
int m, int n,
double offdiag, double diag,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void dlaset_full_kernel(
int m, int n,
double offdiag, double diag,
double *dA, int ldda )
{
dlaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void dlaset_lower_kernel(
int m, int n,
double offdiag, double diag,
double *dA, int ldda )
{
dlaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void dlaset_upper_kernel(
int m, int n,
double offdiag, double diag,
double *dA, int ldda )
{
dlaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void dlaset_full_kernel_batched(
int m, int n,
double offdiag, double diag,
double **dAarray, int ldda )
{
int batchid = blockIdx.z;
dlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void dlaset_lower_kernel_batched(
int m, int n,
double offdiag, double diag,
double **dAarray, int ldda )
{
int batchid = blockIdx.z;
dlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void dlaset_upper_kernel_batched(
int m, int n,
double offdiag, double diag,
double **dAarray, int ldda )
{
int batchid = blockIdx.z;
dlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DLASET initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag DOUBLE PRECISION
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag DOUBLE PRECISION
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
and A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C"
void magmablas_dlaset_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
dlaset_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
dlaset_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, cudaMemset is faster.
// TODO: use cudaMemset2D ?
if ( m == ldda &&
MAGMA_D_EQUAL( offdiag, MAGMA_D_ZERO ) &&
MAGMA_D_EQUAL( diag, MAGMA_D_ZERO ) )
{
size_t size = m*n;
cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(double), queue->cuda_stream() );
assert( err == cudaSuccess );
}
else {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
dlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void magmablas_dlaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
double offdiag, double diag,
magmaDouble_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
dlaset_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
dlaset_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else {
dlaset_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
}
|
d41b71c45cb997df2eb19d673e59014e1b9a3d06.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Authors: Fernando Amat
* test_gpu_elementwiseOp.cpp
*
* Created on : June 5th, 2015
* Author : Fernando Amat
*
* \brief testing GPU kernels to perform pointwise operations
*
*/
#include <cstdint>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <iostream>
#include <algorithm>
#include <fstream>
#include "hip/hip_runtime.h"
#include "book.h"
#include "multiviewDeconvolution.h"
typedef float dataType;
using namespace std;
int main(int argc, const char** argv)
{
std::cout << "testing GPU multi-view deconvolution weights calculation in the GPU running..." << std::endl;
int devCUDA = 0;
//parameters
string filepath("C:/Users/Fernando/matlabProjects/deconvolution/CUDA/test/data/");
if (argc > 1)
filepath = string(argv[1]);
string filePatternImg(filepath + "imReg_?.klb");
string filePatternWeights(filepath + "weightsReg_?.klb");
int numViews = 1;
float anisotropyZ = 5.0;
//=====================================================================
HANDLE_ERROR(hipSetDevice(devCUDA));
//declare object
multiviewDeconvolution<float> J;
J.setNumberOfViews(numViews);
//read images
string filename;
int err;
for (int ii = 0; ii < numViews; ii++)
{
//calculate weights
filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternImg, ii + 1);
err = J.readImage(filename, ii, std::string("img"));
if (err > 0)
{
cout << "ERROR: reading file " << filename << endl;
return err;
}
filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternWeights, ii + 1);
err = J.readImage(filename, ii, std::string("weight"));//this function should just read image
if (err > 0)
{
cout << "ERROR: reading file " << filename << endl;
return err;
}
//calculate weights
J.calculateWeights(ii, anisotropyZ);
//compare weights
char buffer[256];
sprintf(buffer, "%sdebug_weightsRef_%d.raw", filepath.c_str(), ii + 1);
J.debug_writeGPUarray_weights(ii, string(buffer));
}
return 0;
}
| d41b71c45cb997df2eb19d673e59014e1b9a3d06.cu | /*
*
* Authors: Fernando Amat
* test_gpu_elementwiseOp.cpp
*
* Created on : June 5th, 2015
* Author : Fernando Amat
*
* \brief testing GPU kernels to perform pointwise operations
*
*/
#include <cstdint>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <iostream>
#include <algorithm>
#include <fstream>
#include "cuda.h"
#include "book.h"
#include "multiviewDeconvolution.h"
typedef float dataType;
using namespace std;
int main(int argc, const char** argv)
{
std::cout << "testing GPU multi-view deconvolution weights calculation in the GPU running..." << std::endl;
int devCUDA = 0;
//parameters
string filepath("C:/Users/Fernando/matlabProjects/deconvolution/CUDA/test/data/");
if (argc > 1)
filepath = string(argv[1]);
string filePatternImg(filepath + "imReg_?.klb");
string filePatternWeights(filepath + "weightsReg_?.klb");
int numViews = 1;
float anisotropyZ = 5.0;
//=====================================================================
HANDLE_ERROR(cudaSetDevice(devCUDA));
//declare object
multiviewDeconvolution<float> J;
J.setNumberOfViews(numViews);
//read images
string filename;
int err;
for (int ii = 0; ii < numViews; ii++)
{
//calculate weights
filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternImg, ii + 1);
err = J.readImage(filename, ii, std::string("img"));
if (err > 0)
{
cout << "ERROR: reading file " << filename << endl;
return err;
}
filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternWeights, ii + 1);
err = J.readImage(filename, ii, std::string("weight"));//this function should just read image
if (err > 0)
{
cout << "ERROR: reading file " << filename << endl;
return err;
}
//calculate weights
J.calculateWeights(ii, anisotropyZ);
//compare weights
char buffer[256];
sprintf(buffer, "%sdebug_weightsRef_%d.raw", filepath.c_str(), ii + 1);
J.debug_writeGPUarray_weights(ii, string(buffer));
}
return 0;
}
|
01de9f7669d6d5b0d611925b10d9b9213acbe70c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "solveLS.h"
/**
Gets the reduced mass by checking the isospin channel, which determines the type of NN scattering
@param channel: Scattering channel
@return Reduced mass
*/
double getReducedMass(std::vector<QuantumState> channel) {
double mu = 0;
int TzChannel = channel[0].state["Tz"];
/* Proton-proton scattering */
if (TzChannel == -1)
mu = constants::protonMass / 2;
/* Proton-neutron scattering */
else if (TzChannel == 0)
mu = constants::nucleonReducedMass;
/* Neutron-neutron scattering */
else if (TzChannel == 1)
mu = constants::neutronMass / 2;
return mu;
}
/**
Checks if the quantum state is coupled or not.
@param channel: Scattering channel
@return True if coupled, false if not
*/
bool isCoupled(std::vector<QuantumState> channel) {
/* If there is only one channel the state is uncoupled, otherwise there are four channels and the state is coupled. */
return !(channel.size() == 1);
}
/* TODO: Explain what this does*/
__global__
void setupDVectorSum(
double* sum,
double* k0,
int quadratureN,
int TLabLength,
double* k,
double* w) {
int slice = blockIdx.x * blockDim.x + threadIdx.x;
if (slice < TLabLength) {
sum[slice] = 0;
for (int column = 0; column < quadratureN; ++column) {
sum[slice] += w[column] / (k0[slice] * k0[slice] - k[column] * k[column]);
}
}
}
/**
Gets the on-shell point k0 for different types of NN scattering
@param k0: On-shell point
@param TLab: Kinetic energy for the projectile in the lab system
@param TLabLength: Size of the TLab array
@param TzChannel: Current Tz channel
@return On-shell point k0
*/
__global__
void getk0(double* k0, double* TLab, int TLabLength, int TzChannel) {
int slice = blockIdx.x * blockDim.x + threadIdx.x;
//Hardcode for Tz=0
if (slice < TLabLength) {
k0[slice] = sqrt(pow(constants::neutronMass, 2) * TLab[slice] * (TLab[slice]
+ 2 * constants::protonMass) / ((pow(constants::protonMass
+ constants::neutronMass, 2) + 2 * TLab[slice] * constants::neutronMass)));
}
}
int main() {
using microseconds = std::chrono::microseconds;
auto start = std::chrono::high_resolution_clock::now();
/*We define our parameters here*/
constexpr double TLabMin = 1; // Minimum energy
constexpr double TLabMax = 300; // Threshold energy for pion creation
constexpr int TLabLength = 4096; // Number of energies to generate
constexpr int quadratureN = 40;
/*End of defining parameters*/
/*We initialize CUDA and cuBLAS here*/
// Initialize CUDA
auto startcudafree0 = std::chrono::high_resolution_clock::now();
hipFree(0);
auto stopcudafree0 = std::chrono::high_resolution_clock::now();
// cuBLAS variables
hipblasStatus_t status;
hipblasHandle_t handle;
// Initialize cuBLAS
auto cublasCreate_start = std::chrono::high_resolution_clock::now();
status = hipblasCreate(&handle);
auto cublasCreate_stop = std::chrono::high_resolution_clock::now();
/*End of initializing CUDA and cuBlas*/
/* Set up the quantum states by choosing ranges for the J and Tz quantum numbers*/
int JMin = 0;
int JMax = 2;
int TzMin = 0;
int TzMax = 2;
std::vector<QuantumState> basis = setupBasis(JMin, JMax, TzMin, TzMax);
std::map<std::string, std::vector<QuantumState>> channels = setupNNChannels(basis);
// TODO: Explain
std::string key = "J:0 S:0 Tz:0 pi:1"; // TODO: Looks like "magic numbers" for outside reader, explain this
std::vector<QuantumState> channel = channels[key];
if (channel.size() == 0) {
std::cout << "Invalid key";
abort();
}
int TzChannel = channel[0].state["Tz"];
/* Number of quadrature points, needed for array sizes and later the quadrature setup */
/* All matrices and vectors have the same length/height; the number of quadrature points
* plus one (because of the on-shell point). Furthermore, in the uncoupled case there is
* is only one phase shift whereas in the uncoupled case there are two phase shifts and
* one mixing angle. */
int matLength;
int phasesSize;
bool coupled = isCoupled(channel);
if (!coupled) {
matLength = quadratureN + 1;
phasesSize = 1;
}
else {
/* Let matLength be two times as big to facilitate calculations later */
matLength = 2 * (quadratureN + 1);
phasesSize = 3;
}
/* Prepare to generate TLab [Mev] */
constexpr double TLabIncr = (TLabMax - TLabMin + 1) / TLabLength;
auto startAllocateHost = std::chrono::high_resolution_clock::now();
/* Allocate memory on the host */
double* k_h = new double[quadratureN];
double* k0_h = new double[TLabLength];
hipDoubleComplex* phases_h = new hipDoubleComplex[phasesSize * TLabLength];
double* TLab_h = new double[TLabLength];
hipDoubleComplex* V_h = new hipDoubleComplex[matLength * matLength * TLabLength];
double* w_h = new double[quadratureN];
auto stopAllocateHost = std::chrono::high_resolution_clock::now();
/* Generate different experimental kinetic energies [MeV] */
for (int i = 0; i < TLabLength; i++) {
TLab_h[i] = i * TLabIncr + TLabMin;
TLab_h[i] = TLabMin + i * TLabIncr;
//printf("Tlab[%i] = %.4e", i, TLab_h[i]);
}
/* Set up the quadrature points k with weights w */
constexpr double scale = 100; // TODO: Explain how this is chosen
auto startKvadratur = std::chrono::high_resolution_clock::now();
gaussLegendreInfMesh(k_h, w_h, quadratureN, scale);
auto stopKvadratur = std::chrono::high_resolution_clock::now();
/* Declare device variables to be able to allocate them on the device */
hipDoubleComplex* F_d;
hipDoubleComplex* D_d;
double* k_d;
double* k0_d;
hipDoubleComplex* phases_d;
double* sum_d;
hipDoubleComplex* T_d;
double* TLab_d;
hipDoubleComplex* V_d;
hipDoubleComplex* VD_d;
double* w_d;
auto startAllocateDevice = std::chrono::high_resolution_clock::now();
/* Allocate memory on the device */
hipMalloc((void**)&F_d, matLength * matLength * TLabLength * sizeof(hipDoubleComplex));
hipMalloc((void**)&D_d, matLength * TLabLength * sizeof(hipDoubleComplex));
hipMalloc((void**)&k_d, quadratureN * sizeof(double));
hipMalloc((void**)&k0_d, TLabLength * sizeof(double));
hipMalloc((void**)&phases_d, phasesSize * TLabLength * sizeof(hipDoubleComplex));
hipMalloc((void**)&sum_d, TLabLength * sizeof(double));
hipMalloc((void**)&T_d, matLength * TLabLength * sizeof(hipDoubleComplex));
hipMalloc((void**)&TLab_d, TLabLength * sizeof(double));
hipMalloc((void**)&V_d, matLength * matLength * TLabLength * sizeof(hipDoubleComplex));
hipMalloc((void**)&VD_d, matLength * matLength * TLabLength * sizeof(hipDoubleComplex));
hipMalloc((void**)&w_d, quadratureN * sizeof(double));
auto stopAllocateDevice = std::chrono::high_resolution_clock::now();
auto startCopyHostToDevice = std::chrono::high_resolution_clock::now();
/* Copy host variables to device variables */
hipMemcpy(k_d, k_h, quadratureN * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(TLab_d, TLab_h, TLabLength * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(w_d, w_h, quadratureN * sizeof(double), hipMemcpyHostToDevice);
auto stopCopyHostToDevice = std::chrono::high_resolution_clock::now();
// TODO: Explain this
dim3 threadsPerBlock(matLength, matLength, TLabLength); // Block size
dim3 blocksPerGrid(1,1,1); // Grid size
threadsPerBlock.x = 4;
threadsPerBlock.y = 4;
threadsPerBlock.z = 64;
blocksPerGrid.x = ceil(double(matLength) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(matLength) / double(threadsPerBlock.y));
blocksPerGrid.z = ceil(double(TLabLength) / double(threadsPerBlock.z));
int blockSize = 256;
int numBlocks = (TLabLength + blockSize - 1) / blockSize;
auto startGetk0 = std::chrono::high_resolution_clock::now();
/* Get the on-shell points for different TLab with parallellization */
hipLaunchKernelGGL(( getk0) , dim3(numBlocks), dim3(blockSize), 0, 0, k0_d, TLab_d, TLabLength, TzChannel);
auto stopGetk0 = std::chrono::high_resolution_clock::now();
/* Use k0 to generate different potentials on the CPU. The CPU generated potentials are
* then sent to the GPU as an array. */
hipMemcpy(k0_h, k0_d, TLabLength * sizeof(double), hipMemcpyDeviceToHost);
auto startPotential = std::chrono::high_resolution_clock::now();
potential(V_h, channel, k_h, TLab_h, k0_h, quadratureN, TLabLength, coupled, matLength);
auto stopPotential = std::chrono::high_resolution_clock::now();
hipMemcpy(V_d, V_h, matLength * matLength * TLabLength * sizeof(hipDoubleComplex), hipMemcpyHostToDevice);
double mu = getReducedMass(channel);
/* Call kernels on GPU */
auto startDsum = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( setupDVectorSum) , dim3(numBlocks), dim3(blockSize) , 0, 0, sum_d, k0_d, quadratureN, TLabLength, k_d, w_d);
auto stopDsum = std::chrono::high_resolution_clock::now();
auto startSetupD = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( setupDVector) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, D_d, k_d, w_d, k0_d, sum_d, quadratureN, matLength, TLabLength, mu, coupled);
auto stopSetupD = std::chrono::high_resolution_clock::now();
auto startSetupVDKernel = std::chrono::high_resolution_clock::now();
/* Setup the VD kernel and, at the same time, the F matrix */
hipLaunchKernelGGL(( setupVDKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, T_d, VD_d, V_d, D_d, F_d, k_d, w_d, k0_d, quadratureN, matLength, TLabLength, mu, coupled);
auto stopSetupVDKernel = std::chrono::high_resolution_clock::now();
auto startcomputeTMatrixCUBLAS = std::chrono::high_resolution_clock::now();
/* Solve the equation FT = V with cuBLAS */
computeTMatrixCUBLAS(T_d, F_d, matLength, TLabLength, status, handle);
auto stopcomputeTMatrixCUBLAS = std::chrono::high_resolution_clock::now();
/* TODO: Explain this */
/* Computes the phase shifts for the given T-matrix*/
auto startcomputePhaseShifts = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( computePhaseShifts) , dim3(numBlocks), dim3(blockSize), 0, 0, phases_d, T_d, k0_d, quadratureN, mu, coupled,
TLabLength, matLength);
auto stopcomputePhaseShifts = std::chrono::high_resolution_clock::now();
/* Make sure all kernels are done before accessing device variables from host */
hipDeviceSynchronize();
/* Copy (relevant) device variables to host variables */
hipMemcpy(phases_h, phases_d, phasesSize * TLabLength * sizeof(hipDoubleComplex), hipMemcpyDeviceToHost);
// for (int i = 0; i < TLabLength; i++) {
// if (coupled) {
// for (int j = 0; j < phasesSize; ++j) {
// printf("\nTLab = %f", TLab_h[i]);
// printf("\nReal(phases[%i]) = %.10e", j, cuCreal(phases_h[j + i * phasesSize]));
// printf("\nImag(phases[%i]) = %.10e", j, cuCimag(phases_h[j + i * phasesSize]));
// printf("\n");
// }
// }
// else {
// printf("\nTLab = %f", TLab_h[i]);
// printf("\nReal(phase) = %.10e", cuCreal(phases_h[i]));
// printf("\nImag(phase) = %.10e", cuCimag(phases_h[i]));
// printf("\n");
// }
// }
/**
for (int i = 0; i < TLabLength; i = i+20) {
if (coupled) {
for (int j = 0; j < phasesSize; ++j) {
printf("\nTLab = %f", TLab_h[i]);
printf("\nReal(phases[%i]) = %.10e", j, cuCreal(phases_h[j + i * phasesSize]));
printf("\nImag(phases[%i]) = %.10e", j, cuCimag(phases_h[j + i * phasesSize]));
printf("\n");
}
}
else {
printf("\nTLab = %f", TLab_h[i]);
printf("\nReal(phase) = %.10e", cuCreal(phases_h[i]));
printf("\nImag(phase) = %.10e", cuCimag(phases_h[i]));
printf("\n");
}
}
*/
auto freemem_start = std::chrono::high_resolution_clock::now();
/* Free allocated host memory */
delete[] k_h;
delete[] k0_h;
delete[] phases_h;
delete[] TLab_h;
delete[] V_h;
delete[] w_h;
/* Free allocated device memory */
// hipFree(F_d);
hipFree(D_d);
hipFree(k0_d);
hipFree(k_d);
hipFree(phases_d);
hipFree(sum_d);
hipFree(T_d);
hipFree(TLab_d);
hipFree(V_d);
hipFree(VD_d);
hipFree(w_d);
auto freemem_end = std::chrono::high_resolution_clock::now();
auto finish = std::chrono::high_resolution_clock::now();
/*
std::cout << "total: \t\t\t" << std::chrono::duration_cast<microseconds>(finish - start).count()<<"\n";
std::cout << "cudafree(0): \t\t" << std::chrono::duration_cast<microseconds>(stopcudafree0 - startcudafree0).count() << "\n";
std::cout << "hipblasCreate: " << std::chrono::duration_cast<microseconds>(cublasCreate_stop - cublasCreate_start).count() << "\n";
std::cout << "allocate host: \t\t" << std::chrono::duration_cast<microseconds>(stopAllocateHost - startAllocateHost).count()<<"\n";
std::cout << "kvadratur: \t\t" << std::chrono::duration_cast<microseconds>(stopKvadratur - startKvadratur).count()<<"\n";
std::cout << "allocated device: \t" << std::chrono::duration_cast<microseconds>(stopAllocateDevice - startAllocateDevice).count()<<"\n";
std::cout << "copy host to device: \t" << std::chrono::duration_cast<microseconds>(stopCopyHostToDevice - startCopyHostToDevice).count()<<"\n";
std::cout << "getk0: \t\t\t" << std::chrono::duration_cast<microseconds>(stopGetk0 - startGetk0).count()<<"\n";
std::cout << "potential: \t\t" << std::chrono::duration_cast<microseconds>(stopPotential - startPotential).count()<<"\n";
std::cout << "Dsum: \t\t\t" << std::chrono::duration_cast<microseconds>(stopDsum - startDsum).count()<<"\n";
std::cout << "setupD: \t\t" << std::chrono::duration_cast<microseconds>(stopSetupD - startSetupD).count()<<"\n";
std::cout << "SetupVDKernel: \t\t" << std::chrono::duration_cast<microseconds>(stopSetupVDKernel - startSetupVDKernel).count()<<"\n";
std::cout << "computeTMatrixCUBLAS: \t" << std::chrono::duration_cast<microseconds>(stopcomputeTMatrixCUBLAS - startcomputeTMatrixCUBLAS).count()<<"\n";
std::cout << "computePhaseShifts: \t" << std::chrono::duration_cast<microseconds>(stopcomputePhaseShifts - startcomputePhaseShifts).count()<<"\n";
std::cout << "Free memory: \t" << std::chrono::duration_cast<microseconds>(freemem_end - freemem_start).count() << "\n";
*/
std::cout << std::chrono::duration_cast<microseconds>(finish - start).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopcudafree0 - startcudafree0).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(cublasCreate_stop - cublasCreate_start).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopAllocateHost - startAllocateHost).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopKvadratur - startKvadratur).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopAllocateDevice - startAllocateDevice).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopCopyHostToDevice - startCopyHostToDevice).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopGetk0 - startGetk0).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopPotential - startPotential).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopDsum - startDsum).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopSetupD - startSetupD).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopSetupVDKernel - startSetupVDKernel).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopcomputeTMatrixCUBLAS - startcomputeTMatrixCUBLAS).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopcomputePhaseShifts - startcomputePhaseShifts).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(freemem_end - freemem_start).count() << "\n";
return 0;
} | 01de9f7669d6d5b0d611925b10d9b9213acbe70c.cu | #include "solveLS.h"
/**
Gets the reduced mass by checking the isospin channel, which determines the type of NN scattering
@param channel: Scattering channel
@return Reduced mass
*/
double getReducedMass(std::vector<QuantumState> channel) {
double mu = 0;
int TzChannel = channel[0].state["Tz"];
/* Proton-proton scattering */
if (TzChannel == -1)
mu = constants::protonMass / 2;
/* Proton-neutron scattering */
else if (TzChannel == 0)
mu = constants::nucleonReducedMass;
/* Neutron-neutron scattering */
else if (TzChannel == 1)
mu = constants::neutronMass / 2;
return mu;
}
/**
Checks if the quantum state is coupled or not.
@param channel: Scattering channel
@return True if coupled, false if not
*/
bool isCoupled(std::vector<QuantumState> channel) {
/* If there is only one channel the state is uncoupled, otherwise there are four channels and the state is coupled. */
return !(channel.size() == 1);
}
/* TODO: Explain what this does*/
__global__
void setupDVectorSum(
double* sum,
double* k0,
int quadratureN,
int TLabLength,
double* k,
double* w) {
int slice = blockIdx.x * blockDim.x + threadIdx.x;
if (slice < TLabLength) {
sum[slice] = 0;
for (int column = 0; column < quadratureN; ++column) {
sum[slice] += w[column] / (k0[slice] * k0[slice] - k[column] * k[column]);
}
}
}
/**
Gets the on-shell point k0 for different types of NN scattering
@param k0: On-shell point
@param TLab: Kinetic energy for the projectile in the lab system
@param TLabLength: Size of the TLab array
@param TzChannel: Current Tz channel
@return On-shell point k0
*/
__global__
void getk0(double* k0, double* TLab, int TLabLength, int TzChannel) {
int slice = blockIdx.x * blockDim.x + threadIdx.x;
//Hardcode for Tz=0
if (slice < TLabLength) {
k0[slice] = sqrt(pow(constants::neutronMass, 2) * TLab[slice] * (TLab[slice]
+ 2 * constants::protonMass) / ((pow(constants::protonMass
+ constants::neutronMass, 2) + 2 * TLab[slice] * constants::neutronMass)));
}
}
int main() {
using microseconds = std::chrono::microseconds;
auto start = std::chrono::high_resolution_clock::now();
/*We define our parameters here*/
constexpr double TLabMin = 1; // Minimum energy
constexpr double TLabMax = 300; // Threshold energy for pion creation
constexpr int TLabLength = 4096; // Number of energies to generate
constexpr int quadratureN = 40;
/*End of defining parameters*/
/*We initialize CUDA and cuBLAS here*/
// Initialize CUDA
auto startcudafree0 = std::chrono::high_resolution_clock::now();
cudaFree(0);
auto stopcudafree0 = std::chrono::high_resolution_clock::now();
// cuBLAS variables
cublasStatus_t status;
cublasHandle_t handle;
// Initialize cuBLAS
auto cublasCreate_start = std::chrono::high_resolution_clock::now();
status = cublasCreate(&handle);
auto cublasCreate_stop = std::chrono::high_resolution_clock::now();
/*End of initializing CUDA and cuBlas*/
/* Set up the quantum states by choosing ranges for the J and Tz quantum numbers*/
int JMin = 0;
int JMax = 2;
int TzMin = 0;
int TzMax = 2;
std::vector<QuantumState> basis = setupBasis(JMin, JMax, TzMin, TzMax);
std::map<std::string, std::vector<QuantumState>> channels = setupNNChannels(basis);
// TODO: Explain
std::string key = "J:0 S:0 Tz:0 pi:1"; // TODO: Looks like "magic numbers" for outside reader, explain this
std::vector<QuantumState> channel = channels[key];
if (channel.size() == 0) {
std::cout << "Invalid key";
abort();
}
int TzChannel = channel[0].state["Tz"];
/* Number of quadrature points, needed for array sizes and later the quadrature setup */
/* All matrices and vectors have the same length/height; the number of quadrature points
* plus one (because of the on-shell point). Furthermore, in the uncoupled case there is
* is only one phase shift whereas in the uncoupled case there are two phase shifts and
* one mixing angle. */
int matLength;
int phasesSize;
bool coupled = isCoupled(channel);
if (!coupled) {
matLength = quadratureN + 1;
phasesSize = 1;
}
else {
/* Let matLength be two times as big to facilitate calculations later */
matLength = 2 * (quadratureN + 1);
phasesSize = 3;
}
/* Prepare to generate TLab [Mev] */
constexpr double TLabIncr = (TLabMax - TLabMin + 1) / TLabLength;
auto startAllocateHost = std::chrono::high_resolution_clock::now();
/* Allocate memory on the host */
double* k_h = new double[quadratureN];
double* k0_h = new double[TLabLength];
cuDoubleComplex* phases_h = new cuDoubleComplex[phasesSize * TLabLength];
double* TLab_h = new double[TLabLength];
cuDoubleComplex* V_h = new cuDoubleComplex[matLength * matLength * TLabLength];
double* w_h = new double[quadratureN];
auto stopAllocateHost = std::chrono::high_resolution_clock::now();
/* Generate different experimental kinetic energies [MeV] */
for (int i = 0; i < TLabLength; i++) {
TLab_h[i] = i * TLabIncr + TLabMin;
TLab_h[i] = TLabMin + i * TLabIncr;
//printf("Tlab[%i] = %.4e", i, TLab_h[i]);
}
/* Set up the quadrature points k with weights w */
constexpr double scale = 100; // TODO: Explain how this is chosen
auto startKvadratur = std::chrono::high_resolution_clock::now();
gaussLegendreInfMesh(k_h, w_h, quadratureN, scale);
auto stopKvadratur = std::chrono::high_resolution_clock::now();
/* Declare device variables to be able to allocate them on the device */
cuDoubleComplex* F_d;
cuDoubleComplex* D_d;
double* k_d;
double* k0_d;
cuDoubleComplex* phases_d;
double* sum_d;
cuDoubleComplex* T_d;
double* TLab_d;
cuDoubleComplex* V_d;
cuDoubleComplex* VD_d;
double* w_d;
auto startAllocateDevice = std::chrono::high_resolution_clock::now();
/* Allocate memory on the device */
cudaMalloc((void**)&F_d, matLength * matLength * TLabLength * sizeof(cuDoubleComplex));
cudaMalloc((void**)&D_d, matLength * TLabLength * sizeof(cuDoubleComplex));
cudaMalloc((void**)&k_d, quadratureN * sizeof(double));
cudaMalloc((void**)&k0_d, TLabLength * sizeof(double));
cudaMalloc((void**)&phases_d, phasesSize * TLabLength * sizeof(cuDoubleComplex));
cudaMalloc((void**)&sum_d, TLabLength * sizeof(double));
cudaMalloc((void**)&T_d, matLength * TLabLength * sizeof(cuDoubleComplex));
cudaMalloc((void**)&TLab_d, TLabLength * sizeof(double));
cudaMalloc((void**)&V_d, matLength * matLength * TLabLength * sizeof(cuDoubleComplex));
cudaMalloc((void**)&VD_d, matLength * matLength * TLabLength * sizeof(cuDoubleComplex));
cudaMalloc((void**)&w_d, quadratureN * sizeof(double));
auto stopAllocateDevice = std::chrono::high_resolution_clock::now();
auto startCopyHostToDevice = std::chrono::high_resolution_clock::now();
/* Copy host variables to device variables */
cudaMemcpy(k_d, k_h, quadratureN * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(TLab_d, TLab_h, TLabLength * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(w_d, w_h, quadratureN * sizeof(double), cudaMemcpyHostToDevice);
auto stopCopyHostToDevice = std::chrono::high_resolution_clock::now();
// TODO: Explain this
dim3 threadsPerBlock(matLength, matLength, TLabLength); // Block size
dim3 blocksPerGrid(1,1,1); // Grid size
threadsPerBlock.x = 4;
threadsPerBlock.y = 4;
threadsPerBlock.z = 64;
blocksPerGrid.x = ceil(double(matLength) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(matLength) / double(threadsPerBlock.y));
blocksPerGrid.z = ceil(double(TLabLength) / double(threadsPerBlock.z));
int blockSize = 256;
int numBlocks = (TLabLength + blockSize - 1) / blockSize;
auto startGetk0 = std::chrono::high_resolution_clock::now();
/* Get the on-shell points for different TLab with parallellization */
getk0 <<<numBlocks, blockSize>>>(k0_d, TLab_d, TLabLength, TzChannel);
auto stopGetk0 = std::chrono::high_resolution_clock::now();
/* Use k0 to generate different potentials on the CPU. The CPU generated potentials are
* then sent to the GPU as an array. */
cudaMemcpy(k0_h, k0_d, TLabLength * sizeof(double), cudaMemcpyDeviceToHost);
auto startPotential = std::chrono::high_resolution_clock::now();
potential(V_h, channel, k_h, TLab_h, k0_h, quadratureN, TLabLength, coupled, matLength);
auto stopPotential = std::chrono::high_resolution_clock::now();
cudaMemcpy(V_d, V_h, matLength * matLength * TLabLength * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
double mu = getReducedMass(channel);
/* Call kernels on GPU */
auto startDsum = std::chrono::high_resolution_clock::now();
setupDVectorSum <<<numBlocks, blockSize >>> (sum_d, k0_d, quadratureN, TLabLength, k_d, w_d);
auto stopDsum = std::chrono::high_resolution_clock::now();
auto startSetupD = std::chrono::high_resolution_clock::now();
setupDVector <<<blocksPerGrid, threadsPerBlock>>> (D_d, k_d, w_d, k0_d, sum_d, quadratureN, matLength, TLabLength, mu, coupled);
auto stopSetupD = std::chrono::high_resolution_clock::now();
auto startSetupVDKernel = std::chrono::high_resolution_clock::now();
/* Setup the VD kernel and, at the same time, the F matrix */
setupVDKernel <<<blocksPerGrid, threadsPerBlock>>> (T_d, VD_d, V_d, D_d, F_d, k_d, w_d, k0_d, quadratureN, matLength, TLabLength, mu, coupled);
auto stopSetupVDKernel = std::chrono::high_resolution_clock::now();
auto startcomputeTMatrixCUBLAS = std::chrono::high_resolution_clock::now();
/* Solve the equation FT = V with cuBLAS */
computeTMatrixCUBLAS(T_d, F_d, matLength, TLabLength, status, handle);
auto stopcomputeTMatrixCUBLAS = std::chrono::high_resolution_clock::now();
/* TODO: Explain this */
/* Computes the phase shifts for the given T-matrix*/
auto startcomputePhaseShifts = std::chrono::high_resolution_clock::now();
computePhaseShifts <<<numBlocks, blockSize>>> (phases_d, T_d, k0_d, quadratureN, mu, coupled,
TLabLength, matLength);
auto stopcomputePhaseShifts = std::chrono::high_resolution_clock::now();
/* Make sure all kernels are done before accessing device variables from host */
cudaDeviceSynchronize();
/* Copy (relevant) device variables to host variables */
cudaMemcpy(phases_h, phases_d, phasesSize * TLabLength * sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
// for (int i = 0; i < TLabLength; i++) {
// if (coupled) {
// for (int j = 0; j < phasesSize; ++j) {
// printf("\nTLab = %f", TLab_h[i]);
// printf("\nReal(phases[%i]) = %.10e", j, cuCreal(phases_h[j + i * phasesSize]));
// printf("\nImag(phases[%i]) = %.10e", j, cuCimag(phases_h[j + i * phasesSize]));
// printf("\n");
// }
// }
// else {
// printf("\nTLab = %f", TLab_h[i]);
// printf("\nReal(phase) = %.10e", cuCreal(phases_h[i]));
// printf("\nImag(phase) = %.10e", cuCimag(phases_h[i]));
// printf("\n");
// }
// }
/**
for (int i = 0; i < TLabLength; i = i+20) {
if (coupled) {
for (int j = 0; j < phasesSize; ++j) {
printf("\nTLab = %f", TLab_h[i]);
printf("\nReal(phases[%i]) = %.10e", j, cuCreal(phases_h[j + i * phasesSize]));
printf("\nImag(phases[%i]) = %.10e", j, cuCimag(phases_h[j + i * phasesSize]));
printf("\n");
}
}
else {
printf("\nTLab = %f", TLab_h[i]);
printf("\nReal(phase) = %.10e", cuCreal(phases_h[i]));
printf("\nImag(phase) = %.10e", cuCimag(phases_h[i]));
printf("\n");
}
}
*/
auto freemem_start = std::chrono::high_resolution_clock::now();
/* Free allocated host memory */
delete[] k_h;
delete[] k0_h;
delete[] phases_h;
delete[] TLab_h;
delete[] V_h;
delete[] w_h;
/* Free allocated device memory */
// cudaFree(F_d);
cudaFree(D_d);
cudaFree(k0_d);
cudaFree(k_d);
cudaFree(phases_d);
cudaFree(sum_d);
cudaFree(T_d);
cudaFree(TLab_d);
cudaFree(V_d);
cudaFree(VD_d);
cudaFree(w_d);
auto freemem_end = std::chrono::high_resolution_clock::now();
auto finish = std::chrono::high_resolution_clock::now();
/*
std::cout << "total: \t\t\t" << std::chrono::duration_cast<microseconds>(finish - start).count()<<"\n";
std::cout << "cudafree(0): \t\t" << std::chrono::duration_cast<microseconds>(stopcudafree0 - startcudafree0).count() << "\n";
std::cout << "cublasCreate: " << std::chrono::duration_cast<microseconds>(cublasCreate_stop - cublasCreate_start).count() << "\n";
std::cout << "allocate host: \t\t" << std::chrono::duration_cast<microseconds>(stopAllocateHost - startAllocateHost).count()<<"\n";
std::cout << "kvadratur: \t\t" << std::chrono::duration_cast<microseconds>(stopKvadratur - startKvadratur).count()<<"\n";
std::cout << "allocated device: \t" << std::chrono::duration_cast<microseconds>(stopAllocateDevice - startAllocateDevice).count()<<"\n";
std::cout << "copy host to device: \t" << std::chrono::duration_cast<microseconds>(stopCopyHostToDevice - startCopyHostToDevice).count()<<"\n";
std::cout << "getk0: \t\t\t" << std::chrono::duration_cast<microseconds>(stopGetk0 - startGetk0).count()<<"\n";
std::cout << "potential: \t\t" << std::chrono::duration_cast<microseconds>(stopPotential - startPotential).count()<<"\n";
std::cout << "Dsum: \t\t\t" << std::chrono::duration_cast<microseconds>(stopDsum - startDsum).count()<<"\n";
std::cout << "setupD: \t\t" << std::chrono::duration_cast<microseconds>(stopSetupD - startSetupD).count()<<"\n";
std::cout << "SetupVDKernel: \t\t" << std::chrono::duration_cast<microseconds>(stopSetupVDKernel - startSetupVDKernel).count()<<"\n";
std::cout << "computeTMatrixCUBLAS: \t" << std::chrono::duration_cast<microseconds>(stopcomputeTMatrixCUBLAS - startcomputeTMatrixCUBLAS).count()<<"\n";
std::cout << "computePhaseShifts: \t" << std::chrono::duration_cast<microseconds>(stopcomputePhaseShifts - startcomputePhaseShifts).count()<<"\n";
std::cout << "Free memory: \t" << std::chrono::duration_cast<microseconds>(freemem_end - freemem_start).count() << "\n";
*/
std::cout << std::chrono::duration_cast<microseconds>(finish - start).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopcudafree0 - startcudafree0).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(cublasCreate_stop - cublasCreate_start).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopAllocateHost - startAllocateHost).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopKvadratur - startKvadratur).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopAllocateDevice - startAllocateDevice).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopCopyHostToDevice - startCopyHostToDevice).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopGetk0 - startGetk0).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopPotential - startPotential).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopDsum - startDsum).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopSetupD - startSetupD).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopSetupVDKernel - startSetupVDKernel).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopcomputeTMatrixCUBLAS - startcomputeTMatrixCUBLAS).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(stopcomputePhaseShifts - startcomputePhaseShifts).count() << ", ";
std::cout << std::chrono::duration_cast<microseconds>(freemem_end - freemem_start).count() << "\n";
return 0;
} |
LU_Faccterzation.hip | // !!! This is a file automatically generated by hipify!!!
//v9
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#define MAXSIZE 8000
#define nCYCLIC
//#define BLOCK_SIZE 1024
int num_threads;
double rstar[2]={0.5,0.5};
__global__ void LU(double* a,int size, int index, int b_size)
{
extern __shared__ float pivot[];
int i;
int tid=threadIdx.x;
int bid=blockIdx.x;
int block_size=b_size;
//debugprintf("num_rows is %d\n",num_rows);
//debugprintf("blockDim.x is %d\n",blockDim.x);
int pivot_start=(index*size+index);
int pivot_end=(index*size+size);
int start;
int end;
int pivot_row;
int my_row;
// if(threadIdx.x==1)
// {
//debug printf("Begin of LU\n");
// for(int index=0;index<n*n;index++){
//debugprintf("%f ",A[index]);
//if((index+1)%(n)==0&&index!=0)
//debugprintf("\n");
//}
// }
//printf("num_rows is%d\n",num_rows);
if(tid==0){
for(i=index;i<size;i++) pivot[i]=a[(index*size)+i];
}
__syncthreads();
if(my_row >pivot_row){
for(i=start+1;i<end;i++){
// a[i]=a[i]-(a[start]*a[(index*size)+i]);
// a[i]=a[i]-(a[start]*a[(index*size)+(index+(i-start))]);
a[i]=a[i]-(a[start]*pivot[(i-my_row)]);
}
}
//if(threadIdx.x==1)
//{
//debugprintf("End of LU!!!\n");
//for(int index=0;index<n*n;index++){
//debugprintf("%f ",A[index]);
//if((index+1)%(n)==0&&index!=0)
//debugprintf("\n");
//}
//}
}
__global__ void scale(double *a, int size, int index){
int i;
int start=(index*size+index);
int end=(index*size+size);
for(i=start+1;i<end;i++){
a[i]=(a[i]/a[start]);
}
}
/* nrm: Compute 2-norm of x
*/
__global__ void solve_L(int n, double *x, double *L)
{
//printf("enter solveL\n");
int i, j,l,ji_1;
//int num_rows = n / blockDim.x;
int num_rows = n/blockDim.x;
#ifdef CYCLIC
if(threadIdx.x==0)
printf("solve_L Cyclic parallel\n");
for (i=1; i< n; i++)
{
//i=l*blockDim.x+threadIdx.x;
for(l= 0; l< num_rows; l++)
{
j=l*blockDim.x+threadIdx.x;
ji_1=j*n+i-1;
if((i<=j)&&(j<n))
x[j] = x[j] - L[ji_1]*x[i-1];
}
__syncthreads();
}
//}
#else
if(threadIdx.x==0)
printf("solve_L non Cyclic parallel\n");
//code for uncyclic
for (i = 1; i < n; i++) {
for (l= 0; l< num_rows; l++) {
j=threadIdx.x*num_rows + l;
ji_1=j*n+i-1;
if((i<=j)&&(j<n))
x[j] = x[j] - L[ji_1]*x[i-1];
}
__syncthreads();
}
#endif
/*
int i, j;
if(threadIdx.x==1){
//int num_row = n / num_threads;
for (i = 1; i < n; i++) {
for (j = i; j < n; j++) {
x[j] = x[j] - L[j*n+i-1]*x[i-1];
}
}
}
*/
}
/* solve_U: Solve upper triangular system Ux = b
* Routine is called with x=b
* (Lower triangle of U)L is ignored
*/
__global__ void solve_U(int n, double *x, double *U)
{
#ifdef CYCLIC
if(threadIdx.x==0)
printf("solve_U Cyclic parallel\n");
int l,i, k,j,ji;
int num_rows = n/blockDim.x;
for(i=n-1; i>0;i--){
if(threadIdx.x==0)
x[i]=x[i]/ U[i*n+i];
__syncthreads();
for (l = 0; l < num_rows; l++)
{
j = l*blockDim.x+threadIdx.x;
ji = j*n + i;
if(j>=0&&j<i)
x[j] = x[j] - U[ji]*x[i];
}
__syncthreads();
}
if(threadIdx.x==0)
x[0] = x[0]/U[0];
#else
if(threadIdx.x==0)
printf("solve_U non Cyclic parallel\n");
int i, k;
int num_rows = n/blockDim.x;
for (i = n-1; i > 0; i--) {
if(threadIdx.x==0)
x[i] = x[i]/U[i*n+i];
__syncthreads();
for(k=0;k<num_rows;k++){
int j = threadIdx.x*num_rows + k;
int UIdx = j*n + i;
if(j>=0&&j<i)
x[j] = x[j] - U[UIdx]*x[i];
}
__syncthreads();
}
if(threadIdx.x==0)
x[0] = x[0]/U[0];
#endif
/*
int i, j;
if(threadIdx.x==1){
for (i = n-1; i > 0; i--) {
x[i] = x[i]/U[i*n+i];
for (j = 0; j < i; j++) {
x[j] = x[j] - U[j*n+i]*x[i];
}
}
x[0] = x[0]/U[0];
}
*/
}
void initialize_XY(int m,int n, double **XY)
{//checked
int i, j;
double h = 1/((double)m+1);
int idx = 0;
for (i = 0; i < m; i++)
{
for (j = 0; j < m; j++)
{
idx = idx+1;
XY[idx-1][0] = (double)(i+1)*h;
XY[idx-1][1] = (double)(j+1)*h;
//printf("XY%d0 is %f\n",idx-1,XY[idx-1][0]);
//printf("XY%d1 is %f\n",idx-1,XY[idx-1][1]);
}
}
//printf("i reached end of init_XY\n");
}
void initialize_f(int m,int n,double *f,double **XY)
{//
int i,j;
double tempx,tempy;
double x_y[n];
for (i = 0; i < m; i++) {
x_y[i] = 0;
}
for (i = 0; i < n; i++) {
//f[i] = 0.1*(((double)rand()%(double)(1))-0.5);
f[i]=((double)rand()/RAND_MAX*2.0-1.0)*0.5*0.1;
//(double)rand()/RAND_MAX*2.0-1.0;//float in range -1 to 1
//printf("fi is%f\n",f[i]);
}
for(i=0;i<n;i++)
{
tempx=(XY[i][0]-0.5)*(XY[i][0]-0.5);
tempy=(XY[i][1]-0.5)*(XY[i][1]-0.5);
//x_y[i]=(XY[j][0]-0.5)*(XY[i][0]-0.5)+(XY[j][1]-0.5)*(XY[j][1]-0.5);
f[i] = f[i] + 1.0 -tempx-tempy;
}
for (i = 0; i < n; i++) {
//debugprintf("fi is%f\n",f[i]);
}
}
void initialize_K(int m, double **K,int n, double **XY)
{//checked
int i,j,idx;
double d[2];
double h ;
h = 1/(m+1);
//debugprintf("initialize_K\n");
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
d[0] = XY[i][0]-XY[j][0];
d[1] = XY[i][1]-XY[j][1];
K[i][j] = exp(-(d[0]*d[0])-(d[1]*d[1]));
//printf("Kij is%f\n",K[i][j] );
//printf("i is%d,j is %d\n",i,j);
}
//printf("\n");
}
}
void Initialize_ksmall(int n,double *k,double **XY)
{ int j;
double d[2];
//k = zeros(n,1);
for(j = 0; j < n; j++)
{ d[0] = rstar[0]-XY[j][0];
d[1] = rstar[1]-XY[j][1];
k[j] = exp(-(d[0]*d[0])-(d[1]*d[1]));
//printf("k_small is%f\n",k[j] );
}
}
void load_value(int n,double **host_K,double *K)
{
int i,j;
for (i = 0; i < n; i++)
{
for(j = 0; j < n; j++)
{
K[i*n+j]=host_K[i][j];
}
}
}
//debugprintf("end of load data\n");
// for(int index=0;index<n*n;index++){
//debug printf("%f ",K[index]);
//if(index%16==0&&index!=0)
//debugprintf("\n");
//}
//}
__global__ void compute_k(int n ,double *device_K)
{
//double t=0.01;
// t*eye(n)+K
double **eye;
/* for(int i=0;i<n;i++)
{ for(int j=0;j<n;j++)
{
eye[i][j]=0;
if(i==j){
eye[i][j]=0.01;
device_K[i*n+j]+=0.01;
}
}
}*/
int l,i,z,iz;
int num_rows = n / blockDim.x;
for (l= 0; l< num_rows; l++)
{
i=l*blockDim.x+threadIdx.x;
//printf("thread id is %d, my row is%d\n",threadIdx.x,i);
for (z = 0; z < n; z++)
{
iz=i*n+z;
if(i==z)
device_K[iz]+=0.01;
}
}
}
int main(int argc, char *argv[])
{int i1,i2;
i1= atoi(argv[1]);
double a1;
double a2;
a1=atof(argv[2]);
a2=atof(argv[3]);
i2=atoi(argv[4]);
// printf(" argv1 is%d, argv2 is%f, arv3 is %f",i1,a1,a2);
int num_of_threads;
double **host_XY, **host_K;
double **XY, *K, *z,*k;
double *device_XY, *device_K,*device_f;
double *f;
double t = 0.01;
int blocks;
//int **arr = (int **)malloc(r * sizeof(int *));
int n,m;
double result=0;
if(argc==5&&((i1*i1)%i2==0)&&i1>1&&i2>0&&i2<=1024){
m=i1;
num_of_threads=i2;
rstar[0]=a1;
rstar[1]=a2;
printf(" you have input m is%d, rstar is%f %f, threads is %d\n",m, rstar[0],rstar[1],num_of_threads);
}
else{
num_of_threads =1024;
m=64;
rstar[0]=0.5;
rstar[1]=0.5;
printf(" your input is 'ilegal'\n");
printf(" input is m is%d, rstar is%f %f, threads is %d\n",m, rstar[0],rstar[1],num_of_threads);
}
/*= atoi(argv[0]);
if (n > MAXSIZE) {
printf("n must be less than %d ... aborting\n", MAXSIZE);
exit(0);
}
if (n <= 0) {
printf("n must be a positive integer ... aborting\n");
exit(0);
}*/
n=m*m;
//debugprintf("n is%d\n",n);
blocks=((n/512));
//debugprintf("number of threads is%d\n",num_of_threads);
//host_XY=(double **)malloc(sizeof(double)*n*2);
//XY=(double **)malloc(sizeof(double)*n*2);
//hipHostMalloc((void **) &h_a, sizeof(int)*m*n);
XY= new double* [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
XY[ setIndex ] = new double[ n ];
for(int way=0; way<2; way++)
{
// initialize stack position (for true LRU)
XY[ setIndex ][ way ] = 0;
}
}
//K=(double *)malloc(sizeof(double)*n*n);
K= new double [n*n];
for(int setIndex=0; setIndex<n*n; setIndex++)
{
K[ setIndex ] = 0;
}
//host_K=(double **)malloc(sizeof(double)*n*n);
host_K= new double* [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
host_K[ setIndex ] = new double[ n ];
for(int way=0; way<n; way++)
{
// initialize stack position (for true LRU)
host_K[ setIndex ][ way ] = 0;
}
}
//f=(double *)malloc(sizeof(double)*n);
f= new double [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
f[ setIndex ] = 0;
}
//k=(double *)malloc(sizeof(double)*n);
k= new double [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
k[setIndex ] = 0;
}
// z=(double *)malloc(sizeof(double)*n);
z= new double [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
z[setIndex ] = 0;
}
//printf("I had reached end of all for loop\n");
initialize_XY(m, n, XY);
initialize_f(m, n,f, XY);
initialize_K(m, host_K, n, XY);
//printf("I had reached here1\n");
Initialize_ksmall(n,k,XY);
load_value(n,host_K,K);
//printf("I had reached here2\n");
hipMalloc( (void**)&device_K, n*n* sizeof (double) );
hipMemcpy(device_K, K, n*n* sizeof (double) ,hipMemcpyHostToDevice);
hipMalloc( (void**)&device_f, n*sizeof (double) );
hipMemcpy(device_f, f, n*sizeof (double) ,hipMemcpyHostToDevice);
// for(int setIndex=0; setIndex<n*n; setIndex++)
// {
// printf("K[ setIndex ] is %f\n",K[ setIndex ]);
// }
//hipMalloc ( (void**)&dev_b, N*N* sizeof (double) );
//hipMalloc( (void**)&device_K, n*n* sizeof (double) );
//dim3 dimBlock(num_of_threads, 1);
//dim3 dimGrid(m / dimBlock.x, m / dimBlock.y);
//t*eye(n)+K
hipLaunchKernelGGL(( compute_k), dim3(1), dim3(num_of_threads), 0, 0, n,device_K);
hipEvent_t start, stop;
float time1,time2;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
for(int i=0;i<n;i++){
hipLaunchKernelGGL(( scale), dim3(1),dim3(1), 0, 0, device_K,n,i);
// blocks= ((N-i-1)/512)+1;
blocks=((n/512));
// printf("Number of blocks rxd : %d \n",blocks);hipLaunchKernelGGL((
LU), dim3(blocks),dim3(512),n*sizeof(double), 0, device_K,n,i,512);
}
// LU<<<1, num_of_threads>>>(n,device_K);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time1, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
/*for(int setIndex=0; setIndex<n*n; setIndex++)
{
// printf("device_K[ setIndex ] is %f\n",device_K[ setIndex ]);
}*/
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( solve_L), dim3(1), dim3(num_of_threads), 0, 0, n,device_f,device_K);
hipLaunchKernelGGL(( solve_U), dim3(1), dim3(num_of_threads), 0, 0, n,device_f,device_K);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time2, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
hipMemcpy(z, device_f, n*sizeof (double),hipMemcpyDeviceToHost);
hipFree(device_K);
hipFree(device_f);
//hipMemcpy(d_K, K, size, hipMemcpyDeviceToHost);
for(int l=0;l<n;l++)
{
result+=z[l]*k[l];
//debugprintf("z[%d] is %f, k[l] is %f\n",l, z[l],k[l]);
}
printf("f(x.y) is %f\n",result );
printf("LU time is %f\n",time1 );
printf("solve_L_U time is %f\n",time2 );
return 1;
}
| LU_Faccterzation.cu | //v9
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#define MAXSIZE 8000
#define nCYCLIC
//#define BLOCK_SIZE 1024
int num_threads;
double rstar[2]={0.5,0.5};
__global__ void LU(double* a,int size, int index, int b_size)
{
extern __shared__ float pivot[];
int i;
int tid=threadIdx.x;
int bid=blockIdx.x;
int block_size=b_size;
//debugprintf("num_rows is %d\n",num_rows);
//debugprintf("blockDim.x is %d\n",blockDim.x);
int pivot_start=(index*size+index);
int pivot_end=(index*size+size);
int start;
int end;
int pivot_row;
int my_row;
// if(threadIdx.x==1)
// {
//debug printf("Begin of LU\n");
// for(int index=0;index<n*n;index++){
//debugprintf("%f ",A[index]);
//if((index+1)%(n)==0&&index!=0)
//debugprintf("\n");
//}
// }
//printf("num_rows is%d\n",num_rows);
if(tid==0){
for(i=index;i<size;i++) pivot[i]=a[(index*size)+i];
}
__syncthreads();
if(my_row >pivot_row){
for(i=start+1;i<end;i++){
// a[i]=a[i]-(a[start]*a[(index*size)+i]);
// a[i]=a[i]-(a[start]*a[(index*size)+(index+(i-start))]);
a[i]=a[i]-(a[start]*pivot[(i-my_row)]);
}
}
//if(threadIdx.x==1)
//{
//debugprintf("End of LU!!!\n");
//for(int index=0;index<n*n;index++){
//debugprintf("%f ",A[index]);
//if((index+1)%(n)==0&&index!=0)
//debugprintf("\n");
//}
//}
}
__global__ void scale(double *a, int size, int index){
int i;
int start=(index*size+index);
int end=(index*size+size);
for(i=start+1;i<end;i++){
a[i]=(a[i]/a[start]);
}
}
/* nrm: Compute 2-norm of x
*/
__global__ void solve_L(int n, double *x, double *L)
{
//printf("enter solveL\n");
int i, j,l,ji_1;
//int num_rows = n / blockDim.x;
int num_rows = n/blockDim.x;
#ifdef CYCLIC
if(threadIdx.x==0)
printf("solve_L Cyclic parallel\n");
for (i=1; i< n; i++)
{
//i=l*blockDim.x+threadIdx.x;
for(l= 0; l< num_rows; l++)
{
j=l*blockDim.x+threadIdx.x;
ji_1=j*n+i-1;
if((i<=j)&&(j<n))
x[j] = x[j] - L[ji_1]*x[i-1];
}
__syncthreads();
}
//}
#else
if(threadIdx.x==0)
printf("solve_L non Cyclic parallel\n");
//code for uncyclic
for (i = 1; i < n; i++) {
for (l= 0; l< num_rows; l++) {
j=threadIdx.x*num_rows + l;
ji_1=j*n+i-1;
if((i<=j)&&(j<n))
x[j] = x[j] - L[ji_1]*x[i-1];
}
__syncthreads();
}
#endif
/*
int i, j;
if(threadIdx.x==1){
//int num_row = n / num_threads;
for (i = 1; i < n; i++) {
for (j = i; j < n; j++) {
x[j] = x[j] - L[j*n+i-1]*x[i-1];
}
}
}
*/
}
/* solve_U: Solve upper triangular system Ux = b
* Routine is called with x=b
* (Lower triangle of U)L is ignored
*/
__global__ void solve_U(int n, double *x, double *U)
{
#ifdef CYCLIC
if(threadIdx.x==0)
printf("solve_U Cyclic parallel\n");
int l,i, k,j,ji;
int num_rows = n/blockDim.x;
for(i=n-1; i>0;i--){
if(threadIdx.x==0)
x[i]=x[i]/ U[i*n+i];
__syncthreads();
for (l = 0; l < num_rows; l++)
{
j = l*blockDim.x+threadIdx.x;
ji = j*n + i;
if(j>=0&&j<i)
x[j] = x[j] - U[ji]*x[i];
}
__syncthreads();
}
if(threadIdx.x==0)
x[0] = x[0]/U[0];
#else
if(threadIdx.x==0)
printf("solve_U non Cyclic parallel\n");
int i, k;
int num_rows = n/blockDim.x;
for (i = n-1; i > 0; i--) {
if(threadIdx.x==0)
x[i] = x[i]/U[i*n+i];
__syncthreads();
for(k=0;k<num_rows;k++){
int j = threadIdx.x*num_rows + k;
int UIdx = j*n + i;
if(j>=0&&j<i)
x[j] = x[j] - U[UIdx]*x[i];
}
__syncthreads();
}
if(threadIdx.x==0)
x[0] = x[0]/U[0];
#endif
/*
int i, j;
if(threadIdx.x==1){
for (i = n-1; i > 0; i--) {
x[i] = x[i]/U[i*n+i];
for (j = 0; j < i; j++) {
x[j] = x[j] - U[j*n+i]*x[i];
}
}
x[0] = x[0]/U[0];
}
*/
}
void initialize_XY(int m,int n, double **XY)
{//checked
int i, j;
double h = 1/((double)m+1);
int idx = 0;
for (i = 0; i < m; i++)
{
for (j = 0; j < m; j++)
{
idx = idx+1;
XY[idx-1][0] = (double)(i+1)*h;
XY[idx-1][1] = (double)(j+1)*h;
//printf("XY%d0 is %f\n",idx-1,XY[idx-1][0]);
//printf("XY%d1 is %f\n",idx-1,XY[idx-1][1]);
}
}
//printf("i reached end of init_XY\n");
}
void initialize_f(int m,int n,double *f,double **XY)
{//
int i,j;
double tempx,tempy;
double x_y[n];
for (i = 0; i < m; i++) {
x_y[i] = 0;
}
for (i = 0; i < n; i++) {
//f[i] = 0.1*(((double)rand()%(double)(1))-0.5);
f[i]=((double)rand()/RAND_MAX*2.0-1.0)*0.5*0.1;
//(double)rand()/RAND_MAX*2.0-1.0;//float in range -1 to 1
//printf("fi is%f\n",f[i]);
}
for(i=0;i<n;i++)
{
tempx=(XY[i][0]-0.5)*(XY[i][0]-0.5);
tempy=(XY[i][1]-0.5)*(XY[i][1]-0.5);
//x_y[i]=(XY[j][0]-0.5)*(XY[i][0]-0.5)+(XY[j][1]-0.5)*(XY[j][1]-0.5);
f[i] = f[i] + 1.0 -tempx-tempy;
}
for (i = 0; i < n; i++) {
//debugprintf("fi is%f\n",f[i]);
}
}
void initialize_K(int m, double **K,int n, double **XY)
{//checked
int i,j,idx;
double d[2];
double h ;
h = 1/(m+1);
//debugprintf("initialize_K\n");
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
d[0] = XY[i][0]-XY[j][0];
d[1] = XY[i][1]-XY[j][1];
K[i][j] = exp(-(d[0]*d[0])-(d[1]*d[1]));
//printf("Kij is%f\n",K[i][j] );
//printf("i is%d,j is %d\n",i,j);
}
//printf("\n");
}
}
void Initialize_ksmall(int n,double *k,double **XY)
{ int j;
double d[2];
//k = zeros(n,1);
for(j = 0; j < n; j++)
{ d[0] = rstar[0]-XY[j][0];
d[1] = rstar[1]-XY[j][1];
k[j] = exp(-(d[0]*d[0])-(d[1]*d[1]));
//printf("k_small is%f\n",k[j] );
}
}
void load_value(int n,double **host_K,double *K)
{
int i,j;
for (i = 0; i < n; i++)
{
for(j = 0; j < n; j++)
{
K[i*n+j]=host_K[i][j];
}
}
}
//debugprintf("end of load data\n");
// for(int index=0;index<n*n;index++){
//debug printf("%f ",K[index]);
//if(index%16==0&&index!=0)
//debugprintf("\n");
//}
//}
__global__ void compute_k(int n ,double *device_K)
{
//double t=0.01;
// t*eye(n)+K
double **eye;
/* for(int i=0;i<n;i++)
{ for(int j=0;j<n;j++)
{
eye[i][j]=0;
if(i==j){
eye[i][j]=0.01;
device_K[i*n+j]+=0.01;
}
}
}*/
int l,i,z,iz;
int num_rows = n / blockDim.x;
for (l= 0; l< num_rows; l++)
{
i=l*blockDim.x+threadIdx.x;
//printf("thread id is %d, my row is%d\n",threadIdx.x,i);
for (z = 0; z < n; z++)
{
iz=i*n+z;
if(i==z)
device_K[iz]+=0.01;
}
}
}
int main(int argc, char *argv[])
{int i1,i2;
i1= atoi(argv[1]);
double a1;
double a2;
a1=atof(argv[2]);
a2=atof(argv[3]);
i2=atoi(argv[4]);
// printf(" argv1 is%d, argv2 is%f, arv3 is %f",i1,a1,a2);
int num_of_threads;
double **host_XY, **host_K;
double **XY, *K, *z,*k;
double *device_XY, *device_K,*device_f;
double *f;
double t = 0.01;
int blocks;
//int **arr = (int **)malloc(r * sizeof(int *));
int n,m;
double result=0;
if(argc==5&&((i1*i1)%i2==0)&&i1>1&&i2>0&&i2<=1024){
m=i1;
num_of_threads=i2;
rstar[0]=a1;
rstar[1]=a2;
printf(" you have input m is%d, rstar is%f %f, threads is %d\n",m, rstar[0],rstar[1],num_of_threads);
}
else{
num_of_threads =1024;
m=64;
rstar[0]=0.5;
rstar[1]=0.5;
printf(" your input is 'ilegal'\n");
printf(" input is m is%d, rstar is%f %f, threads is %d\n",m, rstar[0],rstar[1],num_of_threads);
}
/*= atoi(argv[0]);
if (n > MAXSIZE) {
printf("n must be less than %d ... aborting\n", MAXSIZE);
exit(0);
}
if (n <= 0) {
printf("n must be a positive integer ... aborting\n");
exit(0);
}*/
n=m*m;
//debugprintf("n is%d\n",n);
blocks=((n/512));
//debugprintf("number of threads is%d\n",num_of_threads);
//host_XY=(double **)malloc(sizeof(double)*n*2);
//XY=(double **)malloc(sizeof(double)*n*2);
//cudaMallocHost((void **) &h_a, sizeof(int)*m*n);
XY= new double* [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
XY[ setIndex ] = new double[ n ];
for(int way=0; way<2; way++)
{
// initialize stack position (for true LRU)
XY[ setIndex ][ way ] = 0;
}
}
//K=(double *)malloc(sizeof(double)*n*n);
K= new double [n*n];
for(int setIndex=0; setIndex<n*n; setIndex++)
{
K[ setIndex ] = 0;
}
//host_K=(double **)malloc(sizeof(double)*n*n);
host_K= new double* [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
host_K[ setIndex ] = new double[ n ];
for(int way=0; way<n; way++)
{
// initialize stack position (for true LRU)
host_K[ setIndex ][ way ] = 0;
}
}
//f=(double *)malloc(sizeof(double)*n);
f= new double [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
f[ setIndex ] = 0;
}
//k=(double *)malloc(sizeof(double)*n);
k= new double [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
k[setIndex ] = 0;
}
// z=(double *)malloc(sizeof(double)*n);
z= new double [n];
for(int setIndex=0; setIndex<n; setIndex++)
{
z[setIndex ] = 0;
}
//printf("I had reached end of all for loop\n");
initialize_XY(m, n, XY);
initialize_f(m, n,f, XY);
initialize_K(m, host_K, n, XY);
//printf("I had reached here1\n");
Initialize_ksmall(n,k,XY);
load_value(n,host_K,K);
//printf("I had reached here2\n");
cudaMalloc( (void**)&device_K, n*n* sizeof (double) );
cudaMemcpy(device_K, K, n*n* sizeof (double) ,cudaMemcpyHostToDevice);
cudaMalloc( (void**)&device_f, n*sizeof (double) );
cudaMemcpy(device_f, f, n*sizeof (double) ,cudaMemcpyHostToDevice);
// for(int setIndex=0; setIndex<n*n; setIndex++)
// {
// printf("K[ setIndex ] is %f\n",K[ setIndex ]);
// }
//cudaMalloc ( (void**)&dev_b, N*N* sizeof (double) );
//cudaMalloc( (void**)&device_K, n*n* sizeof (double) );
//dim3 dimBlock(num_of_threads, 1);
//dim3 dimGrid(m / dimBlock.x, m / dimBlock.y);
//t*eye(n)+K
compute_k<<<1, num_of_threads>>>(n,device_K);
cudaEvent_t start, stop;
float time1,time2;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
for(int i=0;i<n;i++){
scale<<<1,1>>>(device_K,n,i);
// blocks= ((N-i-1)/512)+1;
blocks=((n/512));
// printf("Number of blocks rxd : %d \n",blocks);
LU<<<blocks,512,n*sizeof(double)>>>(device_K,n,i,512);
}
// LU<<<1, num_of_threads>>>(n,device_K);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time1, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
/*for(int setIndex=0; setIndex<n*n; setIndex++)
{
// printf("device_K[ setIndex ] is %f\n",device_K[ setIndex ]);
}*/
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
solve_L<<<1, num_of_threads>>>(n,device_f,device_K);
solve_U<<<1, num_of_threads>>>(n,device_f,device_K);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time2, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaMemcpy(z, device_f, n*sizeof (double),cudaMemcpyDeviceToHost);
cudaFree(device_K);
cudaFree(device_f);
//cudaMemcpy(d_K, K, size, cudaMemcpyDeviceToHost);
for(int l=0;l<n;l++)
{
result+=z[l]*k[l];
//debugprintf("z[%d] is %f, k[l] is %f\n",l, z[l],k[l]);
}
printf("f(x.y) is %f\n",result );
printf("LU time is %f\n",time1 );
printf("solve_L_U time is %f\n",time2 );
return 1;
}
|
1cf57f2ce5b39dc4f75e95033c3f7b36165934e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gn_solver.h"
__inline__ __device__ float warpReduce(float val) {
int offset = 32 >> 1;
while (offset > 0) {
val = val + __shfl_down_sync(FULL_MASK, val, offset, 32);
// val = val + __shfl_down(val, offset, 32);
offset = offset >> 1;
}
return val;
}
//! compute a rotation exponential using the Rodrigues Formula.
// rotation axis w (theta = |w|); A = sin(theta) / theta; B = (1 - cos(theta)) / theta^2
__inline__ __device__ void rodrigues_so3_exp(const Vec3f &w, float A, float B, Mat3f &R) {
{
const float wx2 = w(0) * w(0);
const float wy2 = w(1) * w(1);
const float wz2 = w(2) * w(2);
R(0, 0) = 1.0f - B * (wy2 + wz2);
R(1, 1) = 1.0f - B * (wx2 + wz2);
R(2, 2) = 1.0f - B * (wx2 + wy2);
}
{
const float a = A * w(2);
const float b = B * (w(0) * w(1));
R(0, 1) = b - a;
R(1, 0) = b + a;
}
{
const float a = A * w(1);
const float b = B * (w(0) * w(2));
R(0, 2) = b + a;
R(2, 0) = b - a;
}
{
const float a = A * w(0);
const float b = B * (w(1) * w(2));
R(1, 2) = b - a;
R(2, 1) = b + a;
}
}
__inline__ __device__ void poseToMatrix(const Vec3f &rot, const Vec3f &trans, Mat4f &matrix) {
matrix.setIdentity();
Vec3f translation;
Mat3f rotation;
const float theta_sq = rot.dot(rot);
const float theta = std::sqrt(theta_sq);
float A, B;
Vec3f cr = rot.cross(trans);
if (theta_sq < 1e-8) {
A = 1.0f - ONE_SIXTH * theta_sq;
B = 0.5f;
translation = trans + 0.5f * cr;
} else {
float C;
if (theta_sq < 1e-6) {
C = ONE_SIXTH * (1.0f - ONE_TWENTIETH * theta_sq);
A = 1.0f - theta_sq * C;
B = 0.5f - 0.25f * ONE_SIXTH * theta_sq;
} else {
const float inv_theta = 1.0f / theta;
A = sinf(theta) * inv_theta;
B = (1 - cosf(theta)) * (inv_theta * inv_theta);
C = (1 - A) * (inv_theta * inv_theta);
}
Vec3f w_cross = rot.cross(cr);
translation = trans + B * cr + C * w_cross;
}
// 3x3 rotation part:
rodrigues_so3_exp(rot, A, B, rotation);
//set rotation
matrix.topLeftCorner(3, 3) = rotation;
//set translation
matrix.topRightCorner(3, 1) = translation;
}
__inline__ __device__ Mat4f poseToMatrix(const Vec3f &rot, const Vec3f &trans) {
Mat4f res;
poseToMatrix(rot, trans, res);
return res;
}
//! exponentiate a vector in the Lie algebra to generate a new SO3(a 3x3 rotation matrix).
__inline__ __device__ Mat3f exp_rotation(const Vec3f &w) {
const float theta_sq = w.dot(w);
const float theta = std::sqrt(theta_sq);
float A, B;
//Use a Taylor series expansion near zero. This is required for
//accuracy, since sin t / t and (1-cos t)/t^2 are both 0/0.
if (theta_sq < 1e-8) {
A = 1.0f - ONE_SIXTH * theta_sq;
B = 0.5f;
} else {
if (theta_sq < 1e-6) {
B = 0.5f - 0.25f * ONE_SIXTH * theta_sq;
A = 1.0f - theta_sq * ONE_SIXTH * (1.0f - ONE_TWENTIETH * theta_sq);
} else {
const float inv_theta = 1.0f / theta;
A = sinf(theta) * inv_theta;
B = (1 - cosf(theta)) * (inv_theta * inv_theta);
}
}
Mat3f result;
rodrigues_so3_exp(w, A, B, result);
return result;
}
//! logarithm of the 3x3 rotation matrix, generating the corresponding vector in the Lie Algebra
__inline__ __device__ Vec3f ln_rotation(const Mat3f &rotation) {
Vec3f result; // skew symm matrix = (R - R^T) * angle / (2 * sin(angle))
const float cos_angle = (rotation.trace() - 1.0f) * 0.5f;
//(R - R^T) / 2
result(0) = (rotation(2, 1) - rotation(1, 2)) * 0.5f;
result(1) = (rotation(0, 2) - rotation(2, 0)) * 0.5f;
result(2) = (rotation(1, 0) - rotation(0, 1)) * 0.5f;
float sin_angle_abs = result.norm(); //sqrt(result*result);
if (cos_angle > (float) 0.70710678118654752440) { // [0 - Pi/4[ use asin
if (sin_angle_abs > 0) {
result *= asinf(sin_angle_abs) / sin_angle_abs;
}
} else if (cos_angle > -(float) 0.70710678118654752440) { // [Pi/4 - 3Pi/4[ use acos, but antisymmetric part
float angle = acosf(cos_angle);
result *= angle / sin_angle_abs;
} else { // rest use symmetric part
// antisymmetric part vanishes, but still large rotation, need information from symmetric part
const float angle = CUDART_PI_F - asinf(sin_angle_abs);
const float
d0 = rotation(0, 0) - cos_angle,
d1 = rotation(1, 1) - cos_angle,
d2 = rotation(2, 2) - cos_angle;
Vec3f r2;
if (fabsf(d0) > fabsf(d1) && fabsf(d0) > fabsf(d2)) { // first is largest, fill with first column
r2(0) = d0;
r2(1) = (rotation(1, 0) + rotation(0, 1)) * 0.5f;
r2(2) = (rotation(0, 2) + rotation(2, 0)) * 0.5f;
} else if (fabsf(d1) > fabsf(d2)) { // second is largest, fill with second column
r2(0) = (rotation(1, 0) + rotation(0, 1)) * 0.5f;
r2(1) = d1;
r2(2) = (rotation(2, 1) + rotation(1, 2)) * 0.5f;
} else { // third is largest, fill with third column
r2(0) = (rotation(0, 2) + rotation(2, 0)) * 0.5f;
r2(1) = (rotation(2, 1) + rotation(1, 2)) * 0.5f;
r2(2) = d2;
}
// flip, if we point in the wrong direction!
if (r2.dot(result) < 0)
r2 *= -1;
result = r2;
result *= (angle / r2.norm());
}
return result;
}
__inline__ __device__ void matrixToPose(const Mat4f &matrix, Vec3f &rot, Vec3f &trans) {
const Mat3f R = matrix.topLeftCorner(3, 3);
const Vec3f t = matrix.topRightCorner(3, 1);
rot = ln_rotation(R);
const float theta = rot.norm();
float shtot = 0.5f;
if (theta > 0.00001f)
shtot = sinf(theta * 0.5f) / theta;
// now do the rotation
Vec3f rot_half = rot;
rot_half *= -0.5f;
const Mat3f halfrotator = exp_rotation(rot_half);
trans = halfrotator * t;
if (theta > 0.001f)
trans -= rot * (t.dot(rot) * (1 - 2 * shtot) / rot.dot(rot));
else
trans -= rot * (t.dot(rot) / 24);
trans *= 1.0f / (2 * shtot);
}
__inline__ __device__ void
evalMinusJTFDevice(unsigned int variableIdx, SolverInput &input, SolverState &state, SolverParameters ¶meters,
Vec3f &resRot, Vec3f &resTrans) {
// Reset linearized update vector
state.d_deltaRot[variableIdx].setZero();
state.d_deltaTrans[variableIdx].setZero();
//// transrot
uint3 transIndices = make_uint3(variableIdx * 6 + 0, variableIdx * 6 + 1, variableIdx * 6 + 2);
uint3 rotIndices = make_uint3(variableIdx * 6 + 3, variableIdx * 6 + 4, variableIdx * 6 + 5);
resRot = -Vec3f(state.d_denseJtr[rotIndices.x], state.d_denseJtr[rotIndices.y],
state.d_denseJtr[rotIndices.z]); //minus since -Jtf, weight already built in
resTrans = -Vec3f(state.d_denseJtr[transIndices.x], state.d_denseJtr[transIndices.y],
state.d_denseJtr[transIndices.z]); //minus since -Jtf, weight already built in
//// preconditioner
Vec3f pRot(state.d_denseJtJ[rotIndices.x * input.numberOfImages * 6 + rotIndices.x],
state.d_denseJtJ[rotIndices.y * input.numberOfImages * 6 + rotIndices.y],
state.d_denseJtJ[rotIndices.z * input.numberOfImages * 6 + rotIndices.z]);
Vec3f pTrans(state.d_denseJtJ[transIndices.x * input.numberOfImages * 6 + transIndices.x],
state.d_denseJtJ[transIndices.y * input.numberOfImages * 6 + transIndices.y],
state.d_denseJtJ[transIndices.z * input.numberOfImages * 6 + transIndices.z]);
// Preconditioner depends on last solution P(input.d_x)
if (pRot(0) > FLOAT_EPSILON) state.d_precondionerRot[variableIdx](0) = 1.0f / pRot(0);
else state.d_precondionerRot[variableIdx](0) = 1.0f;
if (pRot(1) > FLOAT_EPSILON) state.d_precondionerRot[variableIdx](1) = 1.0f / pRot(1);
else state.d_precondionerRot[variableIdx](1) = 1.0f;
if (pRot(2) > FLOAT_EPSILON) state.d_precondionerRot[variableIdx](2) = 1.0f / pRot(2);
else state.d_precondionerRot[variableIdx](2) = 1.0f;
if (pTrans(0) > FLOAT_EPSILON) state.d_precondionerTrans[variableIdx](0) = 1.0f / pTrans(0);
else state.d_precondionerTrans[variableIdx](0) = 1.0f;
if (pTrans(1) > FLOAT_EPSILON) state.d_precondionerTrans[variableIdx](1) = 1.0f / pTrans(1);
else state.d_precondionerTrans[variableIdx](1) = 1.0f;
if (pTrans(2) > FLOAT_EPSILON) state.d_precondionerTrans[variableIdx](2) = 1.0f / pTrans(2);
else state.d_precondionerTrans[variableIdx](2) = 1.0f;
}
__inline__ __device__ void
applyJTJDenseDevice(unsigned int variableIdx, SolverState &state, float *d_JtJ, unsigned int N, Vec3f &outRot,
Vec3f &outTrans, unsigned int threadIdx) {
// Compute J^T*d_Jp here
outRot.setZero();
outTrans.setZero();
const unsigned int dim = 6 * N;
unsigned int baseVarIdx = variableIdx * 6;
unsigned int i = (threadIdx > 0) ? threadIdx : THREADS_PER_BLOCK_JT_DENSE;
for (; i < N; i += THREADS_PER_BLOCK_JT_DENSE) // iterate through (6) row(s) of JtJ
{
// (row, col) = vars, i
unsigned int baseIdx = 6 * i;
Mat3f block00, block01, block10, block11;
block00 << d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 2];
block01 << d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 5];
block10 << d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 2];
block11 << d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 5];
//// transrot
outTrans += (block00 * state.d_pTrans[i] + block01 * state.d_pRot[i]);
outRot += (block10 * state.d_pTrans[i] + block11 * state.d_pRot[i]);
}
outRot(0) = warpReduce(outRot(0));
outRot(1) = warpReduce(outRot(1));
outRot(2) = warpReduce(outRot(2));
outTrans(0) = warpReduce(outTrans(0));
outTrans(1) = warpReduce(outTrans(1));
outTrans(2) = warpReduce(outTrans(2));
}
__inline__ __device__ void
computeLieUpdate(const Vec3f &updateW, const Vec3f &updateT, const Vec3f &curW, const Vec3f &curT,
Vec3f &newW, Vec3f &newT) {
Mat4f update = poseToMatrix(updateW, updateT);
Mat4f cur = poseToMatrix(curW, curT);
Mat4f n = update * cur;
matrixToPose(n, newW, newT);
}
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) {
const unsigned int N = input.numberOfImages;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N) {
Vec3f resRot, resTrans;
evalMinusJTFDevice(x, input, state, parameters, resRot,
resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_rRot[x] = resRot; // store for next iteration
state.d_rTrans[x] = resTrans; // store for next iteration
const Vec3f pRot = state.d_precondionerRot[x].cwiseProduct(resRot); // apply preconditioner M^-1
state.d_pRot[x] = pRot;
const Vec3f pTrans = state.d_precondionerTrans[x].cwiseProduct(resTrans); // apply preconditioner M^-1
state.d_pTrans[x] = pTrans;
d = resRot.dot(pRot) + resTrans.dot(
pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta
state.d_Ap_XRot[x].setZero();
state.d_Ap_XTrans[x].setZero();
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call
}
__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters) {
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N) {
Vec3f rot, trans;
applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans,
threadIdx.x); // A x p_k => J^T x J x p_k
if (lane == 0) {
atomicAdd(&state.d_Ap_XRot[x].data()[0], rot(0));//TODO
atomicAdd(&state.d_Ap_XRot[x].data()[1], rot(1));
atomicAdd(&state.d_Ap_XRot[x].data()[2], rot(2));
atomicAdd(&state.d_Ap_XTrans[x].data()[0], trans(0));
atomicAdd(&state.d_Ap_XTrans[x].data()[1], trans(1));
atomicAdd(&state.d_Ap_XTrans[x].data()[2], trans(2));
}
}
}
__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters) {
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N) {
d = state.d_pRot[x].dot(state.d_Ap_XRot[x]) +
state.d_pTrans[x].dot(state.d_Ap_XTrans[x]); // x-th term of denominator of alpha
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state) {
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x > 0 && x < N) {
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_deltaRot[x] = state.d_deltaRot[x] + alpha * state.d_pRot[x]; // do a decent step
state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha * state.d_pTrans[x]; // do a decent step
Vec3f rRot = state.d_rRot[x] - alpha * state.d_Ap_XRot[x]; // update residuum
state.d_rRot[x] = rRot; // store for next kernel call
Vec3f rTrans = state.d_rTrans[x] - alpha * state.d_Ap_XTrans[x]; // update residuum
state.d_rTrans[x] = rTrans; // store for next kernel call
Vec3f zRot = state.d_precondionerRot[x].cwiseProduct(rRot); // apply preconditioner M^-1
state.d_zRot[x] = zRot; // save for next kernel call
Vec3f zTrans = state.d_precondionerTrans[x].cwiseProduct(rTrans); // apply preconditioner M^-1
state.d_zTrans[x] = zTrans; // save for next kernel call
b = zRot.dot(rRot) + zTrans.dot(rTrans); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(&state.d_scanAlpha[1], b);
}
}
template<bool lastIteration>
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state) {
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) {
const float rDotzNew = state.d_scanAlpha[1]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_pRot[x] = state.d_zRot[x] + beta * state.d_pRot[x]; // update decent direction
state.d_pTrans[x] = state.d_zTrans[x] + beta * state.d_pTrans[x]; // update decent direction
state.d_Ap_XRot[x].setZero();
state.d_Ap_XTrans[x].setZero();
if (lastIteration) {
Vec3f rot, trans;
computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x],
rot, trans);
state.d_xRot[x] = rot;
state.d_xTrans[x] = trans;
}
}
}
void Initialization(SolverInput &input, SolverState &state, SolverParameters ¶meters) {
const unsigned int N = input.numberOfImages;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK) {
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: "
<< THREADS_PER_BLOCK * THREADS_PER_BLOCK << std::endl;
while (1);
}
CUDA_SAFE_CALL(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
PCGInit_Kernel1 << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state, parameters);
PCGInit_Kernel2 << < blocksPerGrid, THREADS_PER_BLOCK >> > (N, state);
}
bool PCGIteration(SolverInput &input, SolverState &state, SolverParameters ¶meters, bool lastIteration) {
const unsigned int N = input.numberOfImages; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK) {
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: "
<< THREADS_PER_BLOCK * THREADS_PER_BLOCK << std::endl;
while (1);
}
CUDA_SAFE_CALL(hipMemset(state.d_scanAlpha, 0, sizeof(float) * 2));
PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> > (input, state, parameters);
PCGStep_Kernel1b << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state, parameters);
PCGStep_Kernel2 << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state);
float scanAlpha;
CUDA_SAFE_CALL(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
if (fabs(scanAlpha) < 5e-7) {
lastIteration = true;
} //todo check this part
if (lastIteration)
PCGStep_Kernel3<true> << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state);
else
PCGStep_Kernel3<false> << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state);
return lastIteration;
}
inline __device__ float
bilinearInterpolation(float x, float y, const float *d_input, unsigned int imageWidth, unsigned int imageHeight) {
const Vec2i p00(floorf(x), floorf(y));
const Vec2i p01 = p00 + Vec2i(0.0f, 1.0f);
const Vec2i p10 = p00 + Vec2i(1.0f, 0.0f);
const Vec2i p11 = p00 + Vec2i(1.0f, 1.0f);
const float alpha = x - p00(0);
const float beta = y - p00(1);
float s0 = 0.0f;
float w0 = 0.0f;
if (p00(0) < imageWidth && p00(1) < imageHeight) {
float v00 = d_input[p00(1) * imageWidth + p00(0)];
if (v00 != FLT_MAX) {
s0 += (1.0f - alpha) * v00;
w0 += (1.0f - alpha);
}
}
if (p10(0) < imageWidth && p10(1) < imageHeight) {
float v10 = d_input[p10(1) * imageWidth + p10(0)];
if (v10 != FLT_MAX) {
s0 += alpha * v10;
w0 += alpha;
}
}
float s1 = 0.0f;
float w1 = 0.0f;
if (p01(0) < imageWidth && p01(1) < imageHeight) {
float v01 = d_input[p01(1) * imageWidth + p01(0)];
if (v01 != FLT_MAX) {
s1 += (1.0f - alpha) * v01;
w1 += (1.0f - alpha);
}
}
if (p11(0) < imageWidth && p11(1) < imageHeight) {
float v11 = d_input[p11(1) * imageWidth + p11(0)];
if (v11 != FLT_MAX) {
s1 += alpha * v11;
w1 += alpha;
}
}
const float p0 = s0 / w0;
const float p1 = s1 / w1;
float ss = 0.0f;
float ww = 0.0f;
if (w0 > 0.0f) {
ss += (1.0f - beta) * p0;
ww += (1.0f - beta);
}
if (w1 > 0.0f) {
ss += beta * p1;
ww += beta;
}
if (ww > 0.0f) return ss / ww;
else return FLT_MAX;
}
inline __device__ Vec2f
bilinearInterpolation(float x, float y, const Vec2f *d_input, unsigned int imageWidth, unsigned int imageHeight) {
const Vec2i p00(floorf(x), floorf(y));
const Vec2i p01 = p00 + Vec2i(0.0f, 1.0f);
const Vec2i p10 = p00 + Vec2i(1.0f, 0.0f);
const Vec2i p11 = p00 + Vec2i(1.0f, 1.0f);
const float alpha = x - p00(0);
const float beta = y - p00(1);
Vec2f s0(0.0f, 0.0f);
float w0 = 0.0f;
if (p00(0) < imageWidth && p00(1) < imageHeight) {
Vec2f v00 = d_input[p00(1) * imageWidth + p00(0)];
if (v00(0) != FLT_MAX) {
s0 += (1.0f - alpha) * v00;
w0 += (1.0f - alpha);
}
}
if (p10(0) < imageWidth && p10(1) < imageHeight) {
Vec2f v10 = d_input[p10(1) * imageWidth + p10(0)];
if (v10(0) != FLT_MAX) {
s0 += alpha * v10;
w0 += alpha;
}
}
Vec2f s1(0.0f, 0.0f);
float w1 = 0.0f;
if (p01(0) < imageWidth && p01(1) < imageHeight) {
Vec2f v01 = d_input[p01(1) * imageWidth + p01(0)];
if (v01(0) != FLT_MAX) {
s1 += (1.0f - alpha) * v01;
w1 += (1.0f - alpha);
}
}
if (p11(0) < imageWidth && p11(1) < imageHeight) {
Vec2f v11 = d_input[p11(1) * imageWidth + p11(0)];
if (v11(0) != FLT_MAX) {
s1 += alpha * v11;
w1 += alpha;
}
}
const Vec2f p0 = s0 / w0;
const Vec2f p1 = s1 / w1;
Vec2f ss(0.0f, 0.0f);
float ww = 0.0f;
if (w0 > 0.0f) {
ss += (1.0f - beta) * p0;
ww += (1.0f - beta);
}
if (w1 > 0.0f) {
ss += beta * p1;
ww += beta;
}
if (ww > 0.0f) return ss / ww;
else return Vec2f(FLT_MAX, FLT_MAX);
}
inline __device__ Vec4f
bilinearInterpolation(float x, float y, const Vec4f *d_input, unsigned int imageWidth, unsigned int imageHeight) {
const Vec2i p00(floorf(x), floorf(y));
const Vec2i p01 = p00 + Vec2i(0.0f, 1.0f);
const Vec2i p10 = p00 + Vec2i(1.0f, 0.0f);
const Vec2i p11 = p00 + Vec2i(1.0f, 1.0f);
const float alpha = x - p00(0);
const float beta = y - p00(1);
Vec4f s0(0.f, 0.f, 0.f, 0.f);
float w0 = 0.0f;
if (p00(0) < imageWidth && p00(1) < imageHeight) {
Vec4f v00 = d_input[p00(1) * imageWidth + p00(0)];
if (v00(0) != FLT_MAX) {
s0 += (1.0f - alpha) * v00;
w0 += (1.0f - alpha);
}
}
if (p10(0) < imageWidth && p10(1) < imageHeight) {
Vec4f v10 = d_input[p10(1) * imageWidth + p10(0)];
if (v10(0) != FLT_MAX) {
s0 += alpha * v10;
w0 += alpha;
}
}
Vec4f s1(0.f, 0.f, 0.f, 0.f);
float w1 = 0.0f;
if (p01(0) < imageWidth && p01(1) < imageHeight) {
Vec4f v01 = d_input[p01(1) * imageWidth + p01(0)];
if (v01(0) != FLT_MAX) {
s1 += (1.0f - alpha) * v01;
w1 += (1.0f - alpha);
}
}
if (p11(0) < imageWidth && p11(1) < imageHeight) {
Vec4f v11 = d_input[p11(1) * imageWidth + p11(0)];
if (v11(0) != FLT_MAX) {
s1 += alpha * v11;
w1 += alpha;
}
}
const Vec4f p0 = s0 / w0;
const Vec4f p1 = s1 / w1;
Vec4f ss(0.f, 0.f, 0.f, 0.f);
float ww = 0.0f;
if (w0 > 0.0f) {
ss += (1.0f - beta) * p0;
ww += (1.0f - beta);
}
if (w1 > 0.0f) {
ss += beta * p1;
ww += beta;
}
if (ww > 0.0f) return ss / ww;
else return Vec4f(FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX);
}
__inline__ __device__ Vec2f cameraToDepth(float fx, float fy, float cx, float cy, const Vec4f &pos) {
return Vec2f(pos(0) * fx / pos(2) + cx, pos(1) * fy / pos(2) + cy);
}
__inline__ __device__ Vec4f depthToCamera(float fx, float fy, float cx, float cy, const Vec2i &loc, float depth) {
const float x = ((float) loc(0) - cx) / fx;
const float y = ((float) loc(1) - cy) / fy;
return Vec4f(depth * x, depth * y, depth, 1.0f);
}
__global__ void convertLiePosesToMatrices_Kernel(const Vec3f *d_rot, const Vec3f *d_trans, unsigned int numTransforms,
Mat4f *d_transforms, Mat4f *d_transformInvs) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
d_transformInvs[idx] = d_transforms[idx].inverse();
}
}
void
convertLiePosesToMatrices(const Vec3f *d_rot, const Vec3f *d_trans, unsigned int numTransforms, Mat4f *d_transforms,
Mat4f *d_transformInvs) {
convertLiePosesToMatrices_Kernel << < (numTransforms + 8 - 1) / 8, 8 >> > (d_rot, d_trans, numTransforms,
d_transforms, d_transformInvs);
}
__global__ void convertMatricesToLiePoses_Kernel(const Mat4f *d_transforms, unsigned int numTransforms,
Vec3f *d_rot, Vec3f *d_trans) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
matrixToPose(d_transforms[idx], d_rot[idx], d_trans[idx]);
}
}
void convertMatricesToLiePoses(const Mat4f *d_transforms, unsigned int numTransforms, Vec3f *d_rot, Vec3f *d_trans) {
convertMatricesToLiePoses_Kernel << < (numTransforms + 8 - 1) / 8, 8 >> > (d_transforms, numTransforms,
d_rot, d_trans);
}
__inline__ __device__ bool computeAngleDiff(const Mat4f &transform, float angleThresh) {
Vec3f x(1.0f, 1.0f, 1.0f);
x.normalize();
Vec3f v = transform.topLeftCorner(3, 3) * x;
float angle = acosf(fmaxf(fminf(x.dot(v), 1.0f), -1.0f));
return fabsf(angle) < angleThresh;
}
__inline__ __device__ bool isInBoundingBox(const Vec4f cpos, const Mat4f &c2g_transform,
const Vec3f &boundingMin, const Vec3f &boundingMax) {
Vec4f gpos = c2g_transform * cpos;
for (int i = 0; i < 3; ++i) {
if (gpos(i) > boundingMax(i) || gpos(i) < boundingMin(i))
return false;
}
return true;
}
//for pre-filter, no need for normal threshold
__inline__ __device__ bool findDenseCorr(unsigned int idx, unsigned int imageWidth, unsigned int imageHeight,
float distThresh, const Mat4f &transformi_inv, const Mat4f &transformj,
const Vec4f &intrinsics,
const float *tgtDepth, const float *srcDepth,
float depthMin, float depthMax,
Vec3f &boundingMin, Vec3f &boundingMax) {
unsigned int x = idx % imageWidth;
unsigned int y = idx / imageWidth;
Vec2i loc(x, y);
const Vec4f cposj = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3), loc, srcDepth[idx]);
if (cposj(2) > depthMin && cposj(2) < depthMax && isInBoundingBox(cposj, transformj, boundingMin, boundingMax)) {
Vec4f camPosSrcToTgt = transformi_inv * transformj * cposj;
Vec2f tgtScreenPosf = cameraToDepth(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
camPosSrcToTgt);
Vec2i tgtScreenPos((int) roundf(tgtScreenPosf(0)), (int) roundf(tgtScreenPosf(1)));
if (tgtScreenPos(0) >= 0 && tgtScreenPos(1) >= 0 && tgtScreenPos(0) < (int) imageWidth &&
tgtScreenPos(1) < (int) imageHeight) {
Vec4f camPosTgt = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
tgtScreenPos,
tgtDepth[tgtScreenPos(1) * imageWidth + tgtScreenPos(0)]);
if (camPosTgt(2) > depthMin && camPosTgt(2) < depthMax) {
Vec4f diff = camPosSrcToTgt - camPosTgt;
if (diff.norm() <= distThresh) {
return true;
}
}
} // valid projection
} // valid src camera position
return false;
}
__inline__ __device__ bool findDenseCorr(unsigned int idx, unsigned int imageWidth, unsigned int imageHeight,
float distThresh, float normalThresh, const Mat4f &transformi_inv,
const Mat4f &transformj, const Vec4f &intrinsics,
const float *tgtDepth, const Vec4f *tgtNormals,
const float *srcDepth, const Vec4f *srcNormals,
float depthMin, float depthMax,
Vec3f &boundingMin, Vec3f &boundingMax) {
unsigned int x = idx % imageWidth;
unsigned int y = idx / imageWidth;
Vec2i loc(x, y);
const Vec4f cposj = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3), loc, srcDepth[idx]);
if (cposj(2) > depthMin && cposj(2) < depthMax && isInBoundingBox(cposj, transformj, boundingMin, boundingMax)) {
Vec4f nrmj = srcNormals[idx];
if (nrmj(0) != FLT_MAX) {
nrmj = transformi_inv * transformj * nrmj;
Vec4f camPosSrcToTgt = transformi_inv * transformj * cposj;
Vec2f tgtScreenPosf = cameraToDepth(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
camPosSrcToTgt);
Vec2i tgtScreenPos((int) roundf(tgtScreenPosf(0)), (int) roundf(tgtScreenPosf(1)));
if (tgtScreenPos(0) >= 0 && tgtScreenPos(1) >= 0 && tgtScreenPos(0) < (int) imageWidth &&
tgtScreenPos(1) < (int) imageHeight) {
Vec4f camPosTgt = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
tgtScreenPos,
tgtDepth[tgtScreenPos(1) * imageWidth + tgtScreenPos(0)]);
if (camPosTgt(2) > depthMin && camPosTgt(2) < depthMax) {
Vec4f normalTgt = tgtNormals[tgtScreenPos(1) * imageWidth + tgtScreenPos(0)];
if (normalTgt(0) != FLT_MAX) {
Vec4f diff = camPosSrcToTgt - camPosTgt;
float dist = diff.norm();
float dNormal = nrmj.dot(normalTgt);
if (dNormal >= normalThresh && dist <= distThresh) {
return true;
}
}
}
} // valid projection
} // valid src normal
} // valid src camera position
return false;
}
//using camera positions
__device__ bool findDenseCorr(unsigned int idx, unsigned int imageWidth, unsigned int imageHeight,
float distThresh, float normalThresh, const Mat4f &transformi_inv,
const Mat4f &transformj, const Vec4f &intrinsics,
const Vec4f *tgtCamPos, const Vec4f *tgtNormals,
const Vec4f *srcCamPos, const Vec4f *srcNormals,
float depthMin, float depthMax,
Vec3f &boundingMin, Vec3f &boundingMax,
Vec4f &camPosSrc, Vec4f &camPosSrcToTgt,
Vec2f &tgtScreenPosf, Vec4f &camPosTgt, Vec4f &normalTgt) {
const Vec4f cposj = srcCamPos[idx];
if (cposj(2) > depthMin && cposj(2) < depthMax && isInBoundingBox(cposj, transformj, boundingMin, boundingMax)) {
camPosSrc = cposj;
Vec4f nrmj = srcNormals[idx];
if (nrmj(0) != FLT_MAX) {
nrmj = transformi_inv * transformj * nrmj;
camPosSrcToTgt = transformi_inv * transformj * camPosSrc;
tgtScreenPosf = cameraToDepth(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3), camPosSrcToTgt);
Vec2i tgtScreenPos((int) roundf(tgtScreenPosf(0)), (int) roundf(tgtScreenPosf(1)));
if (tgtScreenPos(0) >= 0 && tgtScreenPos(1) >= 0 && tgtScreenPos(0) < (int) imageWidth &&
tgtScreenPos(1) < (int) imageHeight) {
Vec4f cposi = bilinearInterpolation(tgtScreenPosf(0), tgtScreenPosf(1), tgtCamPos,
imageWidth, imageHeight);
if (cposi(2) > depthMin && cposi(2) < depthMax) {
camPosTgt = cposi;
Vec4f nrmi = bilinearInterpolation(tgtScreenPosf(0), tgtScreenPosf(1), tgtNormals,
imageWidth, imageHeight);
if (nrmi(0) != FLT_MAX) {
normalTgt = nrmi;
Vec4f diff = camPosSrcToTgt - camPosTgt;
float dist = diff.norm();
float dNormal = nrmj.dot(nrmi);
if (dNormal >= normalThresh && dist <= distThresh) {
return true;
}
}
}
} // valid projection
} // valid src normal
} // valid src camera position
return false;
}
__inline__ __device__ Mat3f VectorToSkewSymmetricMatrix(const Vec3f &v) {
Mat3f res;
res.setZero();
res(1, 0) = v(2);
res(2, 0) = -v(1);
res(2, 1) = v(0);
res(0, 1) = -v(2);
res(0, 2) = v(1);
res(1, 2) = -v(0);
return res;
}
inline __device__ Mat23f dCameraToScreen(const Vec3f &p, float fx, float fy) {
Mat23f res;
res.setZero();
const float wSquared = p(2) * p(2);
res(0, 0) = fx / p(2);
res(1, 1) = fy / p(2);
res(0, 2) = -fx * p(0) / wSquared;
res(1, 2) = -fy * p(1) / wSquared;
return res;
}
/////////////////////////////////////////////////////////////////////////
// deriv for Ti: (A * e^e * D)^{-1} * p; A = Tj^{-1}; D = Ti
/////////////////////////////////////////////////////////////////////////
__inline__ __device__ Mat36f evalLie_derivI(const Mat4f &A, const Mat4f &D, const Vec3f &p) {
Mat312f j0;
Mat126f j1;
const Mat4f transform = A * D;
Vec3f pt = p - transform.topRightCorner(3, 1);
j0.setZero();
j1.setZero();
j0(0, 0) = pt(0);
j0(0, 1) = pt(1);
j0(0, 2) = pt(2);
j0(1, 3) = pt(0);
j0(1, 4) = pt(1);
j0(1, 5) = pt(2);
j0(2, 6) = pt(0);
j0(2, 7) = pt(1);
j0(2, 8) = pt(2);
for (unsigned int r = 0; r < 3; r++) {
for (unsigned int c = 0; c < 3; c++) {
j0(r, c + 9) = -transform(c, r); //-R(AD)^T
j1(r + 9, c) = A(r, c); // R(A)
}
}
Mat3f RA = A.topLeftCorner(3, 3);
for (unsigned int k = 0; k < 4; k++) {
Vec3f v(D(0, k), D(1, k), D(2, k));
Mat3f ss = VectorToSkewSymmetricMatrix(v);
Mat3f m = RA * ss * -1.0f; //RA * col k of D
for (unsigned int r = 0; r < 3; r++) {
for (unsigned int c = 0; c < 3; c++)
j1(3 * k + r, 3 + c) = m(r, c);
}
}
return j0 * j1;
}
/////////////////////////////////////////////////////////////////////////
// deriv for Tj: (A * e^e * D) * p; A = Ti^{-1}; D = Tj
/////////////////////////////////////////////////////////////////////////
__inline__ __device__ Mat36f evalLie_derivJ(const Mat4f &A, const Mat4f &D, const Vec3f &p) {
Vec3f dr1(D(0, 0), D(0, 1), D(0, 2)); //rows of D (rotation part)
Vec3f dr2(D(1, 0), D(1, 1), D(1, 2));
Vec3f dr3(D(2, 0), D(2, 1), D(2, 2));
float dtx = D(0, 3); //translation of D
float dty = D(1, 3);
float dtz = D(2, 3);
Mat36f jac;
jac(0, 0) = 1.0f;
jac(0, 1) = 0.0f;
jac(0, 2) = 0.0f;
jac(1, 0) = 0.0f;
jac(1, 1) = 1.0f;
jac(1, 2) = 0.0f;
jac(2, 0) = 0.0f;
jac(2, 1) = 0.0f;
jac(2, 2) = 1.0f;
jac(0, 3) = 0.0f;
jac(0, 4) = p.dot(dr3) + dtz;
jac(0, 5) = -(p.dot(dr2) + dty);
jac(1, 3) = -(p.dot(dr3) + dtz);
jac(1, 4) = 0.0f;
jac(1, 5) = p.dot(dr1) + dtx;
jac(2, 3) = p.dot(dr2) + dty;
jac(2, 4) = -(p.dot(dr1) + dtx);
jac(2, 5) = 0.0f;
jac = A.topLeftCorner(3, 3) * jac;
return jac;
}
__inline__ __device__ void computeJacobianBlockRow_i(Vec6f &jacBlockRow, const Mat4f &transform_i,
const Mat4f &invTransform_j, const Vec4f &camPosSrc,
const Vec4f &normalTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f normalTgt_ = normalTgt.head(3);
Mat36f jac = evalLie_derivI(invTransform_j, transform_i, camPosSrc_);
for (unsigned int i = 0; i < 6; i++) {
Vec3f v(jac(0, i), jac(1, i), jac(2, i));
jacBlockRow(i) = -v.dot(normalTgt_); //rot
}
}
__inline__ __device__ void computeJacobianBlockRow_j(Vec6f &jacBlockRow, const Mat4f &invTransform_i,
const Mat4f &transform_j, const Vec4f &camPosSrc,
const Vec4f &normalTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f normalTgt_ = normalTgt.head(3);
Mat36f jac = evalLie_derivJ(invTransform_i, transform_j, camPosSrc_);
for (unsigned int i = 0; i < 6; i++) {
Vec3f v(jac(0, i), jac(1, i), jac(2, i));
jacBlockRow(i) = -v.dot(normalTgt_); //rot
}
}
__inline__ __device__ void
computeJacobianBlockIntensityRow_i(Vec6f &jacBlockRow, const Vec2f &colorFocal, const Mat4f &transform_i,
const Mat4f &invTransform_j, const Vec4f &camPosSrc, const Vec4f &camPosSrcToTgt,
const Vec2f &intensityDerivTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f camPosSrcToTgt_ = camPosSrcToTgt.head(3);
Mat36f jac = evalLie_derivI(invTransform_j, transform_i, camPosSrc_);
Mat23f dProj = dCameraToScreen(camPosSrcToTgt_, colorFocal(0), colorFocal(1));
Vec2f dColorB = intensityDerivTgt;
jacBlockRow = jac.transpose() * dProj.transpose() * dColorB;
}
__inline__ __device__ void
computeJacobianBlockIntensityRow_j(Vec6f &jacBlockRow, const Vec2f &colorFocal, const Mat4f &invTransform_i,
const Mat4f &transform_j, const Vec4f &camPosSrc, const Vec4f &camPosSrcToTgt,
const Vec2f &intensityDerivTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f camPosSrcToTgt_ = camPosSrcToTgt.head(3);
Mat36f jac = evalLie_derivJ(invTransform_i, transform_j, camPosSrc_);
Mat23f dProj = dCameraToScreen(camPosSrcToTgt_, colorFocal(0), colorFocal(1));
Vec2f dColorB = intensityDerivTgt;
jacBlockRow = jac.transpose() * dProj.transpose() * dColorB;
}
////////////////////////////////////////
// build jtj/jtr
////////////////////////////////////////
__inline__ __device__ void
addToLocalSystem(bool isValidCorr, float *d_JtJ, float *d_Jtr, float *d_J, unsigned int dim,
const Vec6f &jacobianBlockRow_i, const Vec6f &jacobianBlockRow_j,
unsigned int vi, unsigned int vj, float residual, float weight, unsigned int tidx) {
//fill in bottom half (vi < vj) -> x < y
for (unsigned int i = 0; i < 6; i++) {
for (unsigned int j = i; j < 6; j++) {
float dii = 0.0f;
float djj = 0.0f;
float dij = 0.0f;
float dji = 0.0f;
__shared__ float s_partJtJ[4];
if (tidx == 0) {
for (unsigned int k = 0; k < 4; k++)
s_partJtJ[k] = 0;
} //TODO try with first 4 threads for all tidx == 0
if (isValidCorr) {
if (vi > 0) {
dii = jacobianBlockRow_i(i) * jacobianBlockRow_i(j) * weight;
}
if (vj > 0) {
djj = jacobianBlockRow_j(i) * jacobianBlockRow_j(j) * weight;
}
if (vi > 0 && vj > 0) {
dij = jacobianBlockRow_i(i) * jacobianBlockRow_j(j) * weight;
if (i != j) {
dji = jacobianBlockRow_i(j) * jacobianBlockRow_j(i) * weight;
}
}
}
dii = warpReduce(dii);
djj = warpReduce(djj);
dij = warpReduce(dij);
dji = warpReduce(dji);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
atomicAdd(&s_partJtJ[0], dii);
atomicAdd(&s_partJtJ[1], djj);
atomicAdd(&s_partJtJ[2], dij);
atomicAdd(&s_partJtJ[3], dji);
}
__syncthreads();
if (tidx == 0) {
atomicAdd(&d_JtJ[(vi * 6 + j) * dim + (vi * 6 + i)], s_partJtJ[0]);
atomicAdd(&d_JtJ[(vj * 6 + j) * dim + (vj * 6 + i)], s_partJtJ[1]);
////JitJj JjtJi
atomicAdd(&d_JtJ[(vj * 6 + j) * dim + (vi * 6 + i)], s_partJtJ[2]);
atomicAdd(&d_JtJ[(vj * 6 + i) * dim + (vi * 6 + j)], s_partJtJ[3]);
}
}
float jtri = 0.0f;
float jtrj = 0.0f;
__shared__ float s_partJtr[2];
if (tidx == 0) { for (unsigned int k = 0; k < 2; k++) s_partJtr[k] = 0; }
if (isValidCorr) {
if (vi > 0) jtri = jacobianBlockRow_i(i) * residual * weight;
if (vj > 0) jtrj = jacobianBlockRow_j(i) * residual * weight;
}
jtri = warpReduce(jtri);
jtrj = warpReduce(jtrj);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
atomicAdd(&s_partJtr[0], jtri);
atomicAdd(&s_partJtr[1], jtrj);
}
__syncthreads();
if (tidx == 0) {
atomicAdd(&d_Jtr[vi * 6 + i], s_partJtr[0]);
atomicAdd(&d_Jtr[vj * 6 + i], s_partJtr[1]);
}
#ifdef DEBUG
float Ji = 0.f;
float Jj = 0.f;
if (isValidCorr) {
if (vi > 0)
Ji = jacobianBlockRow_i(i);
if (vj > 0)
Jj = jacobianBlockRow_j(i);
}
Ji = warpReduce(Ji);
Jj = warpReduce(Jj);
if (tidx % WARP_SIZE == 0) {
atomicAdd(&d_J[vi * 6 + i], Ji);
atomicAdd(&d_J[vj * 6 + i], Jj);
}
#endif
}
}
//
__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters) {
// image indices
unsigned int i, j; // project from j to i
i = blockIdx.x;
j = blockIdx.y; // all pairwise
if (i >= j) return;
const unsigned int tidx = threadIdx.x;
const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor;
const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int idx = y * input.denseDepthWidth + x;
if (idx < (input.denseDepthWidth * input.denseDepthHeight)) {
Mat4f transform = state.d_xTransformInverses[i] * state.d_xTransforms[j];
//if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees
if (!computeAngleDiff(transform, 0.52f)) return; //TODO ~30 degrees
// find correspondence
__shared__ int foundCorr[1];
foundCorr[0] = 0;
__syncthreads();
if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, state.d_xTransformInverses[i],
state.d_xTransforms[j], input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax,
parameters.boundingMin, parameters.boundingMax)) { //i tgt, j src
atomicAdd(foundCorr, 1);
} // found correspondence
__syncthreads();
if (tidx == 0) {
if (foundCorr[0] > parameters.minNumOverlapCorr) {
int addr = atomicAdd(state.d_numDenseOverlappingImages, 1);
state.d_denseOverlappingImages[addr] = make_uint2(i, j);
}
}
} // valid image pixel
}
__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters) {
const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x;
unsigned int j = imageIndices.y;
const unsigned int tidx = threadIdx.x;
const unsigned int gidx = tidx * gridDim.y + blockIdx.y;
if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) {
// find correspondence
const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE;
__shared__ int s_count[numWarps];
s_count[0] = 0;
int count = 0;
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, state.d_xTransformInverses[i],
state.d_xTransforms[j], input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax,
parameters.boundingMin, parameters.boundingMax)) { //i tgt, j src
count++;
} // found correspondence
count = warpReduce(count);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
s_count[tidx / WARP_SIZE] = count;
}
__syncthreads();
for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) {
if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride];
__syncthreads();
}
if (tidx == 0) {
atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]);
}
} // valid image pixel
}
__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state, SolverParameters parameters) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// apply ln to weights
float x = state.d_denseCorrCounts[idx];
if (x > 0) {
if (x < parameters.minNumDenseCorr) {//TODO change
state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr
} else {
state.d_denseCorrCounts[idx] = 1.0f / fminf(logf(x), 9.0f); // natural log
}
}
}
}
template<bool useDepth, bool useColor>
__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters) {
const int imPairIdx = blockIdx.x;
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x;
unsigned int j = imageIndices.y;
float imPairWeight = state.d_denseCorrCounts[imPairIdx];
if (imPairWeight == 0.0f) return;
const unsigned int idx = threadIdx.x;
const unsigned int srcIdx = idx * gridDim.y + blockIdx.y;
if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) {
Mat4f transform_i = state.d_xTransforms[i];
Mat4f transform_j = state.d_xTransforms[j];
Mat4f invTransform_i = state.d_xTransformInverses[i];
Mat4f invTransform_j = state.d_xTransformInverses[j];
// point-to-plane term
Vec6f depthJacBlockRow_i, depthJacBlockRow_j;
depthJacBlockRow_i.setZero();
depthJacBlockRow_j.setZero();
float depthRes = 0.0f;
float depthWeight = 0.0f;
// color term
Vec6f colorJacBlockRow_i, colorJacBlockRow_j;
colorJacBlockRow_i.setZero();
colorJacBlockRow_j.setZero();
float colorRes = 0.0f;
float colorWeight = 0.0f;
// find correspondence
Vec4f camPosSrc;
Vec4f camPosSrcToTgt;
Vec4f camPosTgt;
Vec4f normalTgt;
Vec2f tgtScreenPos;
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh,
invTransform_i, transform_j, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled,
input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_cameraposDownsampled,
input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax,
parameters.boundingMin, parameters.boundingMax,
camPosSrc, camPosSrcToTgt,
tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
if (useDepth) {
if (foundCorr) {
// point-to-plane residual
Vec4f diff = camPosTgt - camPosSrcToTgt;
depthRes = diff.dot(normalTgt);
depthWeight = parameters.weightDenseDepth * imPairWeight *
(powf(fmaxf(0.0f, 1.0f - camPosTgt(2) / (2.0f * parameters.denseDepthMax)),
2.5f)); //fr2_xyz_half
if (i > 0)
computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt);
if (j > 0)
computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt);
}
#ifdef DEBUG
float res = 0.f;
int num = 0;
if (foundCorr) {
res = depthRes;
num = 1;
}
res = warpReduce(res);
num = warpReduce(num);
if (idx % WARP_SIZE == 0) {
atomicAdd(&state.d_sumResidualDEBUG[imPairIdx], res);
atomicAdd(&state.d_numCorrDEBUG[imPairIdx], num);
}
#endif
addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, state.d_J, input.numberOfImages * 6,
depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx);
}
if (useColor) {
bool foundCorrColor = false;
if (foundCorr) {
const Vec2f intensityDerivTgt = bilinearInterpolation(tgtScreenPos(0), tgtScreenPos(1),
input.d_cacheFrames[i].d_intensityDerivsDownsampled,
input.denseDepthWidth,
input.denseDepthHeight);
const float intensityTgt = bilinearInterpolation(tgtScreenPos(0), tgtScreenPos(1),
input.d_cacheFrames[i].d_intensityDownsampled,
input.denseDepthWidth, input.denseDepthHeight);
colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx];
foundCorrColor = (intensityTgt != FLT_MAX && intensityDerivTgt(0) != FLT_MAX &&
abs(colorRes) < parameters.denseColorThresh &&
intensityDerivTgt.norm() > parameters.denseColorGradientMin);
if (foundCorrColor) {
const Vec2f focalLength(input.intrinsics(0), input.intrinsics(1));
if (i > 0)
computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j,
camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0)
computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j,
camPosSrc, camPosSrcToTgt, intensityDerivTgt);
colorWeight = parameters.weightDenseColor * imPairWeight *
fmaxf(0.0f, 1.0f - abs(colorRes) / (1.15f * parameters.denseColorThresh));
}
}
#ifdef DEBUG
float res_c = 0.f;
int num_c = 0;
if (foundCorrColor) {
res_c = colorRes;
num_c = 1;
}
res_c = warpReduce(res_c);
num_c = warpReduce(num_c);
if (idx % WARP_SIZE == 0) {
atomicAdd(&state.d_sumResidualColorDEBUG[imPairIdx], res_c);
atomicAdd(&state.d_numCorrColorDEBUG[imPairIdx], num_c);
}
#endif
addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, state.d_J, input.numberOfImages * 6,
colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx);
}
} // valid image pixel
}
__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float *d_JtJ) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total) {
const unsigned int x = idx % dim;
const unsigned int y = idx / dim;
if (x > y) {
d_JtJ[y * dim + x] = d_JtJ[x * dim + y];
}
}
}
bool BuildDenseSystem(const SolverInput &input, SolverState &state, SolverParameters ¶meters) {
const unsigned int N = input.numberOfImages;
const int sizeJtr = 6 * N;
const int sizeJtJ = sizeJtr * sizeJtr;
const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2;
CUDA_SAFE_CALL(hipMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs));
CUDA_SAFE_CALL(hipMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ));
CUDA_SAFE_CALL(hipMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr));
CUDA_SAFE_CALL(hipMemset(state.d_numDenseOverlappingImages, 0, sizeof(int)));
#ifdef DEBUG
CUDA_SAFE_CALL(hipMemset(state.d_sumResidualDEBUG, 0, sizeof(float) * maxDenseImPairs));
CUDA_SAFE_CALL(hipMemset(state.d_numCorrDEBUG, 0, sizeof(int) * maxDenseImPairs));
CUDA_SAFE_CALL(hipMemset(state.d_sumResidualColorDEBUG, 0, sizeof(float) * maxDenseImPairs));
CUDA_SAFE_CALL(hipMemset(state.d_numCorrColorDEBUG, 0, sizeof(int) * maxDenseImPairs));
CUDA_SAFE_CALL(hipMemset(state.d_J, 0, sizeof(float) * sizeJtr));
#endif
dim3 gridImImOverlap(N, N, 1);
FindImageImageCorr_Kernel << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> > (input, state, parameters);
int numOverlapImagePairs;
CUDA_SAFE_CALL(hipMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages,
sizeof(int), hipMemcpyDeviceToHost));
if (numOverlapImagePairs == 0) {
printf("warning: no overlapping images for dense solve\n");
return false;
}
const int reductionGlobal = (input.denseDepthWidth * input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) /
THREADS_PER_BLOCK_DENSE_DEPTH;
dim3 grid(numOverlapImagePairs, reductionGlobal);
FindDenseCorrespondences_Kernel << < grid, THREADS_PER_BLOCK_DENSE_DEPTH >> > (input, state, parameters);
int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >
(maxDenseImPairs, state, parameters);
bool useDepth = parameters.weightDenseDepth > 0.0f;
bool useColor = parameters.weightDenseColor > 0.0f;
if (useDepth && useColor)
BuildDenseSystem_Kernel<true, true> << < grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >
(input, state, parameters);
else if (useDepth)
BuildDenseSystem_Kernel<true, false> << < grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >
(input, state, parameters);
else {
printf("useDepth and useColor error!\n");
return false;
}
const unsigned int flipgrid =
(sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
FlipJtJ_Kernel << < flipgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> > (sizeJtJ, sizeJtr, state.d_denseJtJ);
#ifdef DEBUG
uint2 *denseOverlappingImages = new uint2[numOverlapImagePairs];
CUDA_SAFE_CALL(hipMemcpy(denseOverlappingImages, state.d_denseOverlappingImages,
sizeof(uint2) * numOverlapImagePairs, hipMemcpyDeviceToHost));
float *denseCorrCounts = new float[numOverlapImagePairs];
CUDA_SAFE_CALL(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts,
sizeof(float) * numOverlapImagePairs, hipMemcpyDeviceToHost));
float *sumResidualDEBUG = new float[numOverlapImagePairs];
int *numCorrDEBUG = new int[numOverlapImagePairs];
CUDA_SAFE_CALL(hipMemcpy(sumResidualDEBUG, state.d_sumResidualDEBUG, sizeof(float) * numOverlapImagePairs,
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(numCorrDEBUG, state.d_numCorrDEBUG, sizeof(float) * numOverlapImagePairs,
hipMemcpyDeviceToHost));
float *sumResidualColorDEBUG = new float[numOverlapImagePairs];
int *numCorrColorDEBUG = new int[numOverlapImagePairs];
CUDA_SAFE_CALL(hipMemcpy(sumResidualColorDEBUG, state.d_sumResidualColorDEBUG,
sizeof(float) * numOverlapImagePairs,
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(numCorrColorDEBUG, state.d_numCorrColorDEBUG, sizeof(float) * numOverlapImagePairs,
hipMemcpyDeviceToHost));
// float *J = new float[sizeJtr];
// CUDA_SAFE_CALL(hipMemcpy(J, state.d_J, sizeof(float) * sizeJtr, hipMemcpyDeviceToHost));
printf("image pair num: %d\n", numOverlapImagePairs);
for (int i = 0; i < numOverlapImagePairs; ++i) {
printf("image pair (%d, %d): %f %d %f %d %f\n", denseOverlappingImages[i].x, denseOverlappingImages[i].y,
denseCorrCounts[i], numCorrDEBUG[i], sumResidualDEBUG[i],
numCorrColorDEBUG[i], sumResidualColorDEBUG[i]);
}
// printf("J:\n");
// for (int i = 0; i < sizeJtr; ++i) {
// printf("%f ", J[i]);
// }
printf("\n");
delete[] denseOverlappingImages;
delete[] denseCorrCounts;
delete[] sumResidualDEBUG;
delete[] numCorrDEBUG;
// delete[] J;
#endif
return true;
}
float EvalGNConvergence(SolverInput &input, SolverState &state) {
const unsigned int N = input.numberOfImages;
Vec3f *deltaRot = new Vec3f[N];
Vec3f *deltaTrans = new Vec3f[N];
CUDA_SAFE_CALL(hipMemcpy(deltaRot, state.d_deltaRot, sizeof(Vec3f) * N, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(deltaTrans, state.d_deltaTrans, sizeof(Vec3f) * N, hipMemcpyDeviceToHost));
float maxVal = 0.f;
for (int i = 0; i < N; ++i) {
float r1 = deltaRot[i].cwiseAbs().maxCoeff();
float r2 = deltaTrans[i].cwiseAbs().maxCoeff();
maxVal = fmaxf(maxVal, fmaxf(r1, r2));
}
delete[] deltaRot;
delete[] deltaTrans;
return maxVal;
}
void solve(SolverInput &input, SolverState &state, SolverParameters ¶meters) {
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++) {
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
convertLiePosesToMatrices(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms,
state.d_xTransformInverses);
bool ok = BuildDenseSystem(input, state, parameters);
if (!ok) {
printf("solve failed!\n");
break;
}
#ifdef DEBUG
float *denseJtJ = new float[36 * input.numberOfImages * input.numberOfImages];
float *denseJtr = new float[6 * input.numberOfImages];
hipMemcpy(denseJtJ, state.d_denseJtJ, 36 * input.numberOfImages * input.numberOfImages,
hipMemcpyDeviceToHost);
hipMemcpy(denseJtr, state.d_denseJtr, 6 * input.numberOfImages, hipMemcpyDeviceToHost);
// printf("denseJtJ:\n");
// for (int k = 0; k < input.numberOfImages * 6; ++k) {
// for (int m = 0; m < input.numberOfImages * 6; ++m) {
// printf("%f ", denseJtJ[k * input.numberOfImages * 6 + m]);
// }
// printf("\n");
// }
printf("denseJtr:\n");
for (int m = 0; m < input.numberOfImages * 6; ++m) {
printf("%f ", denseJtr[m]);
}
printf("\n");
delete[] denseJtJ;
delete[] denseJtr;
#endif
Initialization(input, state, parameters);
#ifdef DEBUG
Vec3f *rRot = new Vec3f[input.numberOfImages];
Vec3f *rTrans = new Vec3f[input.numberOfImages];
Vec3f *zRot = new Vec3f[input.numberOfImages];
Vec3f *zTrans = new Vec3f[input.numberOfImages];
Vec3f *pRot = new Vec3f[input.numberOfImages];
Vec3f *pTrans = new Vec3f[input.numberOfImages];
hipMemcpy(rRot, state.d_rRot, sizeof(Vec3f) * input.numberOfImages, hipMemcpyDeviceToHost);
hipMemcpy(rTrans, state.d_rTrans, sizeof(Vec3f) * input.numberOfImages, hipMemcpyDeviceToHost);
hipMemcpy(zRot, state.d_zRot, sizeof(Vec3f) * input.numberOfImages, hipMemcpyDeviceToHost);
hipMemcpy(zTrans, state.d_zTrans, sizeof(Vec3f) * input.numberOfImages, hipMemcpyDeviceToHost);
hipMemcpy(pRot, state.d_pRot, sizeof(Vec3f) * input.numberOfImages, hipMemcpyDeviceToHost);
hipMemcpy(pTrans, state.d_pTrans, sizeof(Vec3f) * input.numberOfImages, hipMemcpyDeviceToHost);
for (int k = 0; k < input.numberOfImages; ++k) {
std::cout << rRot[k] << std::endl;
std::cout << rTrans[k] << std::endl;
// std::cout << zRot[k] << std::endl;
// std::cout << zTrans[k] << std::endl;
// std::cout << pRot[k] << std::endl;
// std::cout << pTrans[k] << std::endl;
}
delete[] rRot;
delete[] rTrans;
delete[] zRot;
delete[] zTrans;
delete[] pRot;
delete[] pTrans;
#endif
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
if (PCGIteration(input, state, parameters, linIter == parameters.nLinIterations - 1)) {
break;
}
}
if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state) < 0.005f) {
printf("EARLY OUT\n");
break;
}
}
convertLiePosesToMatrices(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms,
state.d_xTransformInverses);
hipDeviceSynchronize();
}
__global__ void copyVec3ToVec4_kernel(Vec4f *dst, Vec3f *src, int num, float w) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num) {
dst[id] = Vec4f(src[id](0), src[id](1), src[id](2), w);
}
}
void copyVec3ToVec4(Vec4f *dst, Vec3f *src, int num, float w) {
copyVec3ToVec4_kernel << < (num + 8 - 1) / 8, 8 >> > (dst, src, num, w);
} | 1cf57f2ce5b39dc4f75e95033c3f7b36165934e4.cu | #include "gn_solver.h"
__inline__ __device__ float warpReduce(float val) {
int offset = 32 >> 1;
while (offset > 0) {
val = val + __shfl_down_sync(FULL_MASK, val, offset, 32);
// val = val + __shfl_down(val, offset, 32);
offset = offset >> 1;
}
return val;
}
//! compute a rotation exponential using the Rodrigues Formula.
// rotation axis w (theta = |w|); A = sin(theta) / theta; B = (1 - cos(theta)) / theta^2
__inline__ __device__ void rodrigues_so3_exp(const Vec3f &w, float A, float B, Mat3f &R) {
{
const float wx2 = w(0) * w(0);
const float wy2 = w(1) * w(1);
const float wz2 = w(2) * w(2);
R(0, 0) = 1.0f - B * (wy2 + wz2);
R(1, 1) = 1.0f - B * (wx2 + wz2);
R(2, 2) = 1.0f - B * (wx2 + wy2);
}
{
const float a = A * w(2);
const float b = B * (w(0) * w(1));
R(0, 1) = b - a;
R(1, 0) = b + a;
}
{
const float a = A * w(1);
const float b = B * (w(0) * w(2));
R(0, 2) = b + a;
R(2, 0) = b - a;
}
{
const float a = A * w(0);
const float b = B * (w(1) * w(2));
R(1, 2) = b - a;
R(2, 1) = b + a;
}
}
__inline__ __device__ void poseToMatrix(const Vec3f &rot, const Vec3f &trans, Mat4f &matrix) {
matrix.setIdentity();
Vec3f translation;
Mat3f rotation;
const float theta_sq = rot.dot(rot);
const float theta = std::sqrt(theta_sq);
float A, B;
Vec3f cr = rot.cross(trans);
if (theta_sq < 1e-8) {
A = 1.0f - ONE_SIXTH * theta_sq;
B = 0.5f;
translation = trans + 0.5f * cr;
} else {
float C;
if (theta_sq < 1e-6) {
C = ONE_SIXTH * (1.0f - ONE_TWENTIETH * theta_sq);
A = 1.0f - theta_sq * C;
B = 0.5f - 0.25f * ONE_SIXTH * theta_sq;
} else {
const float inv_theta = 1.0f / theta;
A = sinf(theta) * inv_theta;
B = (1 - cosf(theta)) * (inv_theta * inv_theta);
C = (1 - A) * (inv_theta * inv_theta);
}
Vec3f w_cross = rot.cross(cr);
translation = trans + B * cr + C * w_cross;
}
// 3x3 rotation part:
rodrigues_so3_exp(rot, A, B, rotation);
//set rotation
matrix.topLeftCorner(3, 3) = rotation;
//set translation
matrix.topRightCorner(3, 1) = translation;
}
__inline__ __device__ Mat4f poseToMatrix(const Vec3f &rot, const Vec3f &trans) {
Mat4f res;
poseToMatrix(rot, trans, res);
return res;
}
//! exponentiate a vector in the Lie algebra to generate a new SO3(a 3x3 rotation matrix).
__inline__ __device__ Mat3f exp_rotation(const Vec3f &w) {
const float theta_sq = w.dot(w);
const float theta = std::sqrt(theta_sq);
float A, B;
//Use a Taylor series expansion near zero. This is required for
//accuracy, since sin t / t and (1-cos t)/t^2 are both 0/0.
if (theta_sq < 1e-8) {
A = 1.0f - ONE_SIXTH * theta_sq;
B = 0.5f;
} else {
if (theta_sq < 1e-6) {
B = 0.5f - 0.25f * ONE_SIXTH * theta_sq;
A = 1.0f - theta_sq * ONE_SIXTH * (1.0f - ONE_TWENTIETH * theta_sq);
} else {
const float inv_theta = 1.0f / theta;
A = sinf(theta) * inv_theta;
B = (1 - cosf(theta)) * (inv_theta * inv_theta);
}
}
Mat3f result;
rodrigues_so3_exp(w, A, B, result);
return result;
}
//! logarithm of the 3x3 rotation matrix, generating the corresponding vector in the Lie Algebra
__inline__ __device__ Vec3f ln_rotation(const Mat3f &rotation) {
Vec3f result; // skew symm matrix = (R - R^T) * angle / (2 * sin(angle))
const float cos_angle = (rotation.trace() - 1.0f) * 0.5f;
//(R - R^T) / 2
result(0) = (rotation(2, 1) - rotation(1, 2)) * 0.5f;
result(1) = (rotation(0, 2) - rotation(2, 0)) * 0.5f;
result(2) = (rotation(1, 0) - rotation(0, 1)) * 0.5f;
float sin_angle_abs = result.norm(); //sqrt(result*result);
if (cos_angle > (float) 0.70710678118654752440) { // [0 - Pi/4[ use asin
if (sin_angle_abs > 0) {
result *= asinf(sin_angle_abs) / sin_angle_abs;
}
} else if (cos_angle > -(float) 0.70710678118654752440) { // [Pi/4 - 3Pi/4[ use acos, but antisymmetric part
float angle = acosf(cos_angle);
result *= angle / sin_angle_abs;
} else { // rest use symmetric part
// antisymmetric part vanishes, but still large rotation, need information from symmetric part
const float angle = CUDART_PI_F - asinf(sin_angle_abs);
const float
d0 = rotation(0, 0) - cos_angle,
d1 = rotation(1, 1) - cos_angle,
d2 = rotation(2, 2) - cos_angle;
Vec3f r2;
if (fabsf(d0) > fabsf(d1) && fabsf(d0) > fabsf(d2)) { // first is largest, fill with first column
r2(0) = d0;
r2(1) = (rotation(1, 0) + rotation(0, 1)) * 0.5f;
r2(2) = (rotation(0, 2) + rotation(2, 0)) * 0.5f;
} else if (fabsf(d1) > fabsf(d2)) { // second is largest, fill with second column
r2(0) = (rotation(1, 0) + rotation(0, 1)) * 0.5f;
r2(1) = d1;
r2(2) = (rotation(2, 1) + rotation(1, 2)) * 0.5f;
} else { // third is largest, fill with third column
r2(0) = (rotation(0, 2) + rotation(2, 0)) * 0.5f;
r2(1) = (rotation(2, 1) + rotation(1, 2)) * 0.5f;
r2(2) = d2;
}
// flip, if we point in the wrong direction!
if (r2.dot(result) < 0)
r2 *= -1;
result = r2;
result *= (angle / r2.norm());
}
return result;
}
__inline__ __device__ void matrixToPose(const Mat4f &matrix, Vec3f &rot, Vec3f &trans) {
const Mat3f R = matrix.topLeftCorner(3, 3);
const Vec3f t = matrix.topRightCorner(3, 1);
rot = ln_rotation(R);
const float theta = rot.norm();
float shtot = 0.5f;
if (theta > 0.00001f)
shtot = sinf(theta * 0.5f) / theta;
// now do the rotation
Vec3f rot_half = rot;
rot_half *= -0.5f;
const Mat3f halfrotator = exp_rotation(rot_half);
trans = halfrotator * t;
if (theta > 0.001f)
trans -= rot * (t.dot(rot) * (1 - 2 * shtot) / rot.dot(rot));
else
trans -= rot * (t.dot(rot) / 24);
trans *= 1.0f / (2 * shtot);
}
__inline__ __device__ void
evalMinusJTFDevice(unsigned int variableIdx, SolverInput &input, SolverState &state, SolverParameters ¶meters,
Vec3f &resRot, Vec3f &resTrans) {
// Reset linearized update vector
state.d_deltaRot[variableIdx].setZero();
state.d_deltaTrans[variableIdx].setZero();
//// trans在前,rot在后
uint3 transIndices = make_uint3(variableIdx * 6 + 0, variableIdx * 6 + 1, variableIdx * 6 + 2);
uint3 rotIndices = make_uint3(variableIdx * 6 + 3, variableIdx * 6 + 4, variableIdx * 6 + 5);
resRot = -Vec3f(state.d_denseJtr[rotIndices.x], state.d_denseJtr[rotIndices.y],
state.d_denseJtr[rotIndices.z]); //minus since -Jtf, weight already built in
resTrans = -Vec3f(state.d_denseJtr[transIndices.x], state.d_denseJtr[transIndices.y],
state.d_denseJtr[transIndices.z]); //minus since -Jtf, weight already built in
//// preconditioner
Vec3f pRot(state.d_denseJtJ[rotIndices.x * input.numberOfImages * 6 + rotIndices.x],
state.d_denseJtJ[rotIndices.y * input.numberOfImages * 6 + rotIndices.y],
state.d_denseJtJ[rotIndices.z * input.numberOfImages * 6 + rotIndices.z]);
Vec3f pTrans(state.d_denseJtJ[transIndices.x * input.numberOfImages * 6 + transIndices.x],
state.d_denseJtJ[transIndices.y * input.numberOfImages * 6 + transIndices.y],
state.d_denseJtJ[transIndices.z * input.numberOfImages * 6 + transIndices.z]);
// Preconditioner depends on last solution P(input.d_x)
if (pRot(0) > FLOAT_EPSILON) state.d_precondionerRot[variableIdx](0) = 1.0f / pRot(0);
else state.d_precondionerRot[variableIdx](0) = 1.0f;
if (pRot(1) > FLOAT_EPSILON) state.d_precondionerRot[variableIdx](1) = 1.0f / pRot(1);
else state.d_precondionerRot[variableIdx](1) = 1.0f;
if (pRot(2) > FLOAT_EPSILON) state.d_precondionerRot[variableIdx](2) = 1.0f / pRot(2);
else state.d_precondionerRot[variableIdx](2) = 1.0f;
if (pTrans(0) > FLOAT_EPSILON) state.d_precondionerTrans[variableIdx](0) = 1.0f / pTrans(0);
else state.d_precondionerTrans[variableIdx](0) = 1.0f;
if (pTrans(1) > FLOAT_EPSILON) state.d_precondionerTrans[variableIdx](1) = 1.0f / pTrans(1);
else state.d_precondionerTrans[variableIdx](1) = 1.0f;
if (pTrans(2) > FLOAT_EPSILON) state.d_precondionerTrans[variableIdx](2) = 1.0f / pTrans(2);
else state.d_precondionerTrans[variableIdx](2) = 1.0f;
}
__inline__ __device__ void
applyJTJDenseDevice(unsigned int variableIdx, SolverState &state, float *d_JtJ, unsigned int N, Vec3f &outRot,
Vec3f &outTrans, unsigned int threadIdx) {
// Compute J^T*d_Jp here
outRot.setZero();
outTrans.setZero();
const unsigned int dim = 6 * N;
unsigned int baseVarIdx = variableIdx * 6;
unsigned int i = (threadIdx > 0) ? threadIdx : THREADS_PER_BLOCK_JT_DENSE;
for (; i < N; i += THREADS_PER_BLOCK_JT_DENSE) // iterate through (6) row(s) of JtJ
{
// (row, col) = vars, i
unsigned int baseIdx = 6 * i;
Mat3f block00, block01, block10, block11;
block00 << d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 2];
block01 << d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 0) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 1) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 2) * dim + baseIdx + 5];
block10 << d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 2],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 0], d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 1],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 2];
block11 << d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 3) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 4) * dim + baseIdx + 5],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 3], d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 4],
d_JtJ[(baseVarIdx + 5) * dim + baseIdx + 5];
//// trans在前,rot在后
outTrans += (block00 * state.d_pTrans[i] + block01 * state.d_pRot[i]);
outRot += (block10 * state.d_pTrans[i] + block11 * state.d_pRot[i]);
}
outRot(0) = warpReduce(outRot(0));
outRot(1) = warpReduce(outRot(1));
outRot(2) = warpReduce(outRot(2));
outTrans(0) = warpReduce(outTrans(0));
outTrans(1) = warpReduce(outTrans(1));
outTrans(2) = warpReduce(outTrans(2));
}
__inline__ __device__ void
computeLieUpdate(const Vec3f &updateW, const Vec3f &updateT, const Vec3f &curW, const Vec3f &curT,
Vec3f &newW, Vec3f &newT) {
Mat4f update = poseToMatrix(updateW, updateT);
Mat4f cur = poseToMatrix(curW, curT);
Mat4f n = update * cur;
matrixToPose(n, newW, newT);
}
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) {
const unsigned int N = input.numberOfImages;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N) {
Vec3f resRot, resTrans;
evalMinusJTFDevice(x, input, state, parameters, resRot,
resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_rRot[x] = resRot; // store for next iteration
state.d_rTrans[x] = resTrans; // store for next iteration
const Vec3f pRot = state.d_precondionerRot[x].cwiseProduct(resRot); // apply preconditioner M^-1
state.d_pRot[x] = pRot;
const Vec3f pTrans = state.d_precondionerTrans[x].cwiseProduct(resTrans); // apply preconditioner M^-1
state.d_pTrans[x] = pTrans;
d = resRot.dot(pRot) + resTrans.dot(
pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta
state.d_Ap_XRot[x].setZero();
state.d_Ap_XTrans[x].setZero();
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call
}
__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters) {
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N) {
Vec3f rot, trans;
applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans,
threadIdx.x); // A x p_k => J^T x J x p_k
if (lane == 0) {
atomicAdd(&state.d_Ap_XRot[x].data()[0], rot(0));//TODO 待检验
atomicAdd(&state.d_Ap_XRot[x].data()[1], rot(1));
atomicAdd(&state.d_Ap_XRot[x].data()[2], rot(2));
atomicAdd(&state.d_Ap_XTrans[x].data()[0], trans(0));
atomicAdd(&state.d_Ap_XTrans[x].data()[1], trans(1));
atomicAdd(&state.d_Ap_XTrans[x].data()[2], trans(2));
}
}
}
__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters) {
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N) {
d = state.d_pRot[x].dot(state.d_Ap_XRot[x]) +
state.d_pTrans[x].dot(state.d_Ap_XTrans[x]); // x-th term of denominator of alpha
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state) {
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x > 0 && x < N) {
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_deltaRot[x] = state.d_deltaRot[x] + alpha * state.d_pRot[x]; // do a decent step
state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha * state.d_pTrans[x]; // do a decent step
Vec3f rRot = state.d_rRot[x] - alpha * state.d_Ap_XRot[x]; // update residuum
state.d_rRot[x] = rRot; // store for next kernel call
Vec3f rTrans = state.d_rTrans[x] - alpha * state.d_Ap_XTrans[x]; // update residuum
state.d_rTrans[x] = rTrans; // store for next kernel call
Vec3f zRot = state.d_precondionerRot[x].cwiseProduct(rRot); // apply preconditioner M^-1
state.d_zRot[x] = zRot; // save for next kernel call
Vec3f zTrans = state.d_precondionerTrans[x].cwiseProduct(rTrans); // apply preconditioner M^-1
state.d_zTrans[x] = zTrans; // save for next kernel call
b = zRot.dot(rRot) + zTrans.dot(rTrans); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(&state.d_scanAlpha[1], b);
}
}
template<bool lastIteration>
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state) {
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) {
const float rDotzNew = state.d_scanAlpha[1]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_pRot[x] = state.d_zRot[x] + beta * state.d_pRot[x]; // update decent direction
state.d_pTrans[x] = state.d_zTrans[x] + beta * state.d_pTrans[x]; // update decent direction
state.d_Ap_XRot[x].setZero();
state.d_Ap_XTrans[x].setZero();
if (lastIteration) {
Vec3f rot, trans;
computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x],
rot, trans);
state.d_xRot[x] = rot;
state.d_xTrans[x] = trans;
}
}
}
void Initialization(SolverInput &input, SolverState &state, SolverParameters ¶meters) {
const unsigned int N = input.numberOfImages;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK) {
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: "
<< THREADS_PER_BLOCK * THREADS_PER_BLOCK << std::endl;
while (1);
}
CUDA_SAFE_CALL(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
PCGInit_Kernel1 << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state, parameters);
PCGInit_Kernel2 << < blocksPerGrid, THREADS_PER_BLOCK >> > (N, state);
}
bool PCGIteration(SolverInput &input, SolverState &state, SolverParameters ¶meters, bool lastIteration) {
const unsigned int N = input.numberOfImages; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK) {
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: "
<< THREADS_PER_BLOCK * THREADS_PER_BLOCK << std::endl;
while (1);
}
CUDA_SAFE_CALL(cudaMemset(state.d_scanAlpha, 0, sizeof(float) * 2));
PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> > (input, state, parameters);
PCGStep_Kernel1b << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state, parameters);
PCGStep_Kernel2 << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state);
float scanAlpha;
CUDA_SAFE_CALL(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
if (fabs(scanAlpha) < 5e-7) {
lastIteration = true;
} //todo check this part
if (lastIteration)
PCGStep_Kernel3<true> << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state);
else
PCGStep_Kernel3<false> << < blocksPerGrid, THREADS_PER_BLOCK >> > (input, state);
return lastIteration;
}
inline __device__ float
bilinearInterpolation(float x, float y, const float *d_input, unsigned int imageWidth, unsigned int imageHeight) {
const Vec2i p00(floorf(x), floorf(y));
const Vec2i p01 = p00 + Vec2i(0.0f, 1.0f);
const Vec2i p10 = p00 + Vec2i(1.0f, 0.0f);
const Vec2i p11 = p00 + Vec2i(1.0f, 1.0f);
const float alpha = x - p00(0);
const float beta = y - p00(1);
float s0 = 0.0f;
float w0 = 0.0f;
if (p00(0) < imageWidth && p00(1) < imageHeight) {
float v00 = d_input[p00(1) * imageWidth + p00(0)];
if (v00 != FLT_MAX) {
s0 += (1.0f - alpha) * v00;
w0 += (1.0f - alpha);
}
}
if (p10(0) < imageWidth && p10(1) < imageHeight) {
float v10 = d_input[p10(1) * imageWidth + p10(0)];
if (v10 != FLT_MAX) {
s0 += alpha * v10;
w0 += alpha;
}
}
float s1 = 0.0f;
float w1 = 0.0f;
if (p01(0) < imageWidth && p01(1) < imageHeight) {
float v01 = d_input[p01(1) * imageWidth + p01(0)];
if (v01 != FLT_MAX) {
s1 += (1.0f - alpha) * v01;
w1 += (1.0f - alpha);
}
}
if (p11(0) < imageWidth && p11(1) < imageHeight) {
float v11 = d_input[p11(1) * imageWidth + p11(0)];
if (v11 != FLT_MAX) {
s1 += alpha * v11;
w1 += alpha;
}
}
const float p0 = s0 / w0;
const float p1 = s1 / w1;
float ss = 0.0f;
float ww = 0.0f;
if (w0 > 0.0f) {
ss += (1.0f - beta) * p0;
ww += (1.0f - beta);
}
if (w1 > 0.0f) {
ss += beta * p1;
ww += beta;
}
if (ww > 0.0f) return ss / ww;
else return FLT_MAX;
}
inline __device__ Vec2f
bilinearInterpolation(float x, float y, const Vec2f *d_input, unsigned int imageWidth, unsigned int imageHeight) {
const Vec2i p00(floorf(x), floorf(y));
const Vec2i p01 = p00 + Vec2i(0.0f, 1.0f);
const Vec2i p10 = p00 + Vec2i(1.0f, 0.0f);
const Vec2i p11 = p00 + Vec2i(1.0f, 1.0f);
const float alpha = x - p00(0);
const float beta = y - p00(1);
Vec2f s0(0.0f, 0.0f);
float w0 = 0.0f;
if (p00(0) < imageWidth && p00(1) < imageHeight) {
Vec2f v00 = d_input[p00(1) * imageWidth + p00(0)];
if (v00(0) != FLT_MAX) {
s0 += (1.0f - alpha) * v00;
w0 += (1.0f - alpha);
}
}
if (p10(0) < imageWidth && p10(1) < imageHeight) {
Vec2f v10 = d_input[p10(1) * imageWidth + p10(0)];
if (v10(0) != FLT_MAX) {
s0 += alpha * v10;
w0 += alpha;
}
}
Vec2f s1(0.0f, 0.0f);
float w1 = 0.0f;
if (p01(0) < imageWidth && p01(1) < imageHeight) {
Vec2f v01 = d_input[p01(1) * imageWidth + p01(0)];
if (v01(0) != FLT_MAX) {
s1 += (1.0f - alpha) * v01;
w1 += (1.0f - alpha);
}
}
if (p11(0) < imageWidth && p11(1) < imageHeight) {
Vec2f v11 = d_input[p11(1) * imageWidth + p11(0)];
if (v11(0) != FLT_MAX) {
s1 += alpha * v11;
w1 += alpha;
}
}
const Vec2f p0 = s0 / w0;
const Vec2f p1 = s1 / w1;
Vec2f ss(0.0f, 0.0f);
float ww = 0.0f;
if (w0 > 0.0f) {
ss += (1.0f - beta) * p0;
ww += (1.0f - beta);
}
if (w1 > 0.0f) {
ss += beta * p1;
ww += beta;
}
if (ww > 0.0f) return ss / ww;
else return Vec2f(FLT_MAX, FLT_MAX);
}
inline __device__ Vec4f
bilinearInterpolation(float x, float y, const Vec4f *d_input, unsigned int imageWidth, unsigned int imageHeight) {
const Vec2i p00(floorf(x), floorf(y));
const Vec2i p01 = p00 + Vec2i(0.0f, 1.0f);
const Vec2i p10 = p00 + Vec2i(1.0f, 0.0f);
const Vec2i p11 = p00 + Vec2i(1.0f, 1.0f);
const float alpha = x - p00(0);
const float beta = y - p00(1);
Vec4f s0(0.f, 0.f, 0.f, 0.f);
float w0 = 0.0f;
if (p00(0) < imageWidth && p00(1) < imageHeight) {
Vec4f v00 = d_input[p00(1) * imageWidth + p00(0)];
if (v00(0) != FLT_MAX) {
s0 += (1.0f - alpha) * v00;
w0 += (1.0f - alpha);
}
}
if (p10(0) < imageWidth && p10(1) < imageHeight) {
Vec4f v10 = d_input[p10(1) * imageWidth + p10(0)];
if (v10(0) != FLT_MAX) {
s0 += alpha * v10;
w0 += alpha;
}
}
Vec4f s1(0.f, 0.f, 0.f, 0.f);
float w1 = 0.0f;
if (p01(0) < imageWidth && p01(1) < imageHeight) {
Vec4f v01 = d_input[p01(1) * imageWidth + p01(0)];
if (v01(0) != FLT_MAX) {
s1 += (1.0f - alpha) * v01;
w1 += (1.0f - alpha);
}
}
if (p11(0) < imageWidth && p11(1) < imageHeight) {
Vec4f v11 = d_input[p11(1) * imageWidth + p11(0)];
if (v11(0) != FLT_MAX) {
s1 += alpha * v11;
w1 += alpha;
}
}
const Vec4f p0 = s0 / w0;
const Vec4f p1 = s1 / w1;
Vec4f ss(0.f, 0.f, 0.f, 0.f);
float ww = 0.0f;
if (w0 > 0.0f) {
ss += (1.0f - beta) * p0;
ww += (1.0f - beta);
}
if (w1 > 0.0f) {
ss += beta * p1;
ww += beta;
}
if (ww > 0.0f) return ss / ww;
else return Vec4f(FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX);
}
__inline__ __device__ Vec2f cameraToDepth(float fx, float fy, float cx, float cy, const Vec4f &pos) {
return Vec2f(pos(0) * fx / pos(2) + cx, pos(1) * fy / pos(2) + cy);
}
__inline__ __device__ Vec4f depthToCamera(float fx, float fy, float cx, float cy, const Vec2i &loc, float depth) {
const float x = ((float) loc(0) - cx) / fx;
const float y = ((float) loc(1) - cy) / fy;
return Vec4f(depth * x, depth * y, depth, 1.0f);
}
__global__ void convertLiePosesToMatrices_Kernel(const Vec3f *d_rot, const Vec3f *d_trans, unsigned int numTransforms,
Mat4f *d_transforms, Mat4f *d_transformInvs) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
d_transformInvs[idx] = d_transforms[idx].inverse();
}
}
void
convertLiePosesToMatrices(const Vec3f *d_rot, const Vec3f *d_trans, unsigned int numTransforms, Mat4f *d_transforms,
Mat4f *d_transformInvs) {
convertLiePosesToMatrices_Kernel << < (numTransforms + 8 - 1) / 8, 8 >> > (d_rot, d_trans, numTransforms,
d_transforms, d_transformInvs);
}
__global__ void convertMatricesToLiePoses_Kernel(const Mat4f *d_transforms, unsigned int numTransforms,
Vec3f *d_rot, Vec3f *d_trans) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
matrixToPose(d_transforms[idx], d_rot[idx], d_trans[idx]);
}
}
void convertMatricesToLiePoses(const Mat4f *d_transforms, unsigned int numTransforms, Vec3f *d_rot, Vec3f *d_trans) {
convertMatricesToLiePoses_Kernel << < (numTransforms + 8 - 1) / 8, 8 >> > (d_transforms, numTransforms,
d_rot, d_trans);
}
__inline__ __device__ bool computeAngleDiff(const Mat4f &transform, float angleThresh) {
Vec3f x(1.0f, 1.0f, 1.0f);
x.normalize();
Vec3f v = transform.topLeftCorner(3, 3) * x;
float angle = acosf(fmaxf(fminf(x.dot(v), 1.0f), -1.0f));
return fabsf(angle) < angleThresh;
}
__inline__ __device__ bool isInBoundingBox(const Vec4f cpos, const Mat4f &c2g_transform,
const Vec3f &boundingMin, const Vec3f &boundingMax) {
Vec4f gpos = c2g_transform * cpos;
for (int i = 0; i < 3; ++i) {
if (gpos(i) > boundingMax(i) || gpos(i) < boundingMin(i))
return false;
}
return true;
}
//for pre-filter, no need for normal threshold
__inline__ __device__ bool findDenseCorr(unsigned int idx, unsigned int imageWidth, unsigned int imageHeight,
float distThresh, const Mat4f &transformi_inv, const Mat4f &transformj,
const Vec4f &intrinsics,
const float *tgtDepth, const float *srcDepth,
float depthMin, float depthMax,
Vec3f &boundingMin, Vec3f &boundingMax) {
unsigned int x = idx % imageWidth;
unsigned int y = idx / imageWidth;
Vec2i loc(x, y);
const Vec4f cposj = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3), loc, srcDepth[idx]);
if (cposj(2) > depthMin && cposj(2) < depthMax && isInBoundingBox(cposj, transformj, boundingMin, boundingMax)) {
Vec4f camPosSrcToTgt = transformi_inv * transformj * cposj;
Vec2f tgtScreenPosf = cameraToDepth(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
camPosSrcToTgt);
Vec2i tgtScreenPos((int) roundf(tgtScreenPosf(0)), (int) roundf(tgtScreenPosf(1)));
if (tgtScreenPos(0) >= 0 && tgtScreenPos(1) >= 0 && tgtScreenPos(0) < (int) imageWidth &&
tgtScreenPos(1) < (int) imageHeight) {
Vec4f camPosTgt = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
tgtScreenPos,
tgtDepth[tgtScreenPos(1) * imageWidth + tgtScreenPos(0)]);
if (camPosTgt(2) > depthMin && camPosTgt(2) < depthMax) {
Vec4f diff = camPosSrcToTgt - camPosTgt;
if (diff.norm() <= distThresh) {
return true;
}
}
} // valid projection
} // valid src camera position
return false;
}
__inline__ __device__ bool findDenseCorr(unsigned int idx, unsigned int imageWidth, unsigned int imageHeight,
float distThresh, float normalThresh, const Mat4f &transformi_inv,
const Mat4f &transformj, const Vec4f &intrinsics,
const float *tgtDepth, const Vec4f *tgtNormals,
const float *srcDepth, const Vec4f *srcNormals,
float depthMin, float depthMax,
Vec3f &boundingMin, Vec3f &boundingMax) {
unsigned int x = idx % imageWidth;
unsigned int y = idx / imageWidth;
Vec2i loc(x, y);
const Vec4f cposj = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3), loc, srcDepth[idx]);
if (cposj(2) > depthMin && cposj(2) < depthMax && isInBoundingBox(cposj, transformj, boundingMin, boundingMax)) {
Vec4f nrmj = srcNormals[idx];
if (nrmj(0) != FLT_MAX) {
nrmj = transformi_inv * transformj * nrmj;
Vec4f camPosSrcToTgt = transformi_inv * transformj * cposj;
Vec2f tgtScreenPosf = cameraToDepth(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
camPosSrcToTgt);
Vec2i tgtScreenPos((int) roundf(tgtScreenPosf(0)), (int) roundf(tgtScreenPosf(1)));
if (tgtScreenPos(0) >= 0 && tgtScreenPos(1) >= 0 && tgtScreenPos(0) < (int) imageWidth &&
tgtScreenPos(1) < (int) imageHeight) {
Vec4f camPosTgt = depthToCamera(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3),
tgtScreenPos,
tgtDepth[tgtScreenPos(1) * imageWidth + tgtScreenPos(0)]);
if (camPosTgt(2) > depthMin && camPosTgt(2) < depthMax) {
Vec4f normalTgt = tgtNormals[tgtScreenPos(1) * imageWidth + tgtScreenPos(0)];
if (normalTgt(0) != FLT_MAX) {
Vec4f diff = camPosSrcToTgt - camPosTgt;
float dist = diff.norm();
float dNormal = nrmj.dot(normalTgt);
if (dNormal >= normalThresh && dist <= distThresh) {
return true;
}
}
}
} // valid projection
} // valid src normal
} // valid src camera position
return false;
}
//using camera positions
__device__ bool findDenseCorr(unsigned int idx, unsigned int imageWidth, unsigned int imageHeight,
float distThresh, float normalThresh, const Mat4f &transformi_inv,
const Mat4f &transformj, const Vec4f &intrinsics,
const Vec4f *tgtCamPos, const Vec4f *tgtNormals,
const Vec4f *srcCamPos, const Vec4f *srcNormals,
float depthMin, float depthMax,
Vec3f &boundingMin, Vec3f &boundingMax,
Vec4f &camPosSrc, Vec4f &camPosSrcToTgt,
Vec2f &tgtScreenPosf, Vec4f &camPosTgt, Vec4f &normalTgt) {
const Vec4f cposj = srcCamPos[idx];
if (cposj(2) > depthMin && cposj(2) < depthMax && isInBoundingBox(cposj, transformj, boundingMin, boundingMax)) {
camPosSrc = cposj;
Vec4f nrmj = srcNormals[idx];
if (nrmj(0) != FLT_MAX) {
nrmj = transformi_inv * transformj * nrmj;
camPosSrcToTgt = transformi_inv * transformj * camPosSrc;
tgtScreenPosf = cameraToDepth(intrinsics(0), intrinsics(1), intrinsics(2), intrinsics(3), camPosSrcToTgt);
Vec2i tgtScreenPos((int) roundf(tgtScreenPosf(0)), (int) roundf(tgtScreenPosf(1)));
if (tgtScreenPos(0) >= 0 && tgtScreenPos(1) >= 0 && tgtScreenPos(0) < (int) imageWidth &&
tgtScreenPos(1) < (int) imageHeight) {
Vec4f cposi = bilinearInterpolation(tgtScreenPosf(0), tgtScreenPosf(1), tgtCamPos,
imageWidth, imageHeight);
if (cposi(2) > depthMin && cposi(2) < depthMax) {
camPosTgt = cposi;
Vec4f nrmi = bilinearInterpolation(tgtScreenPosf(0), tgtScreenPosf(1), tgtNormals,
imageWidth, imageHeight);
if (nrmi(0) != FLT_MAX) {
normalTgt = nrmi;
Vec4f diff = camPosSrcToTgt - camPosTgt;
float dist = diff.norm();
float dNormal = nrmj.dot(nrmi);
if (dNormal >= normalThresh && dist <= distThresh) {
return true;
}
}
}
} // valid projection
} // valid src normal
} // valid src camera position
return false;
}
__inline__ __device__ Mat3f VectorToSkewSymmetricMatrix(const Vec3f &v) {
Mat3f res;
res.setZero();
res(1, 0) = v(2);
res(2, 0) = -v(1);
res(2, 1) = v(0);
res(0, 1) = -v(2);
res(0, 2) = v(1);
res(1, 2) = -v(0);
return res;
}
inline __device__ Mat23f dCameraToScreen(const Vec3f &p, float fx, float fy) {
Mat23f res;
res.setZero();
const float wSquared = p(2) * p(2);
res(0, 0) = fx / p(2);
res(1, 1) = fy / p(2);
res(0, 2) = -fx * p(0) / wSquared;
res(1, 2) = -fy * p(1) / wSquared;
return res;
}
/////////////////////////////////////////////////////////////////////////
// deriv for Ti: (A * e^e * D)^{-1} * p; A = Tj^{-1}; D = Ti
/////////////////////////////////////////////////////////////////////////
__inline__ __device__ Mat36f evalLie_derivI(const Mat4f &A, const Mat4f &D, const Vec3f &p) {
Mat312f j0;
Mat126f j1;
const Mat4f transform = A * D;
Vec3f pt = p - transform.topRightCorner(3, 1);
j0.setZero();
j1.setZero();
j0(0, 0) = pt(0);
j0(0, 1) = pt(1);
j0(0, 2) = pt(2);
j0(1, 3) = pt(0);
j0(1, 4) = pt(1);
j0(1, 5) = pt(2);
j0(2, 6) = pt(0);
j0(2, 7) = pt(1);
j0(2, 8) = pt(2);
for (unsigned int r = 0; r < 3; r++) {
for (unsigned int c = 0; c < 3; c++) {
j0(r, c + 9) = -transform(c, r); //-R(AD)^T
j1(r + 9, c) = A(r, c); // R(A)
}
}
Mat3f RA = A.topLeftCorner(3, 3);
for (unsigned int k = 0; k < 4; k++) {
Vec3f v(D(0, k), D(1, k), D(2, k));
Mat3f ss = VectorToSkewSymmetricMatrix(v);
Mat3f m = RA * ss * -1.0f; //RA * col k of D
for (unsigned int r = 0; r < 3; r++) {
for (unsigned int c = 0; c < 3; c++)
j1(3 * k + r, 3 + c) = m(r, c);
}
}
return j0 * j1;
}
/////////////////////////////////////////////////////////////////////////
// deriv for Tj: (A * e^e * D) * p; A = Ti^{-1}; D = Tj
/////////////////////////////////////////////////////////////////////////
__inline__ __device__ Mat36f evalLie_derivJ(const Mat4f &A, const Mat4f &D, const Vec3f &p) {
Vec3f dr1(D(0, 0), D(0, 1), D(0, 2)); //rows of D (rotation part)
Vec3f dr2(D(1, 0), D(1, 1), D(1, 2));
Vec3f dr3(D(2, 0), D(2, 1), D(2, 2));
float dtx = D(0, 3); //translation of D
float dty = D(1, 3);
float dtz = D(2, 3);
Mat36f jac;
jac(0, 0) = 1.0f;
jac(0, 1) = 0.0f;
jac(0, 2) = 0.0f;
jac(1, 0) = 0.0f;
jac(1, 1) = 1.0f;
jac(1, 2) = 0.0f;
jac(2, 0) = 0.0f;
jac(2, 1) = 0.0f;
jac(2, 2) = 1.0f;
jac(0, 3) = 0.0f;
jac(0, 4) = p.dot(dr3) + dtz;
jac(0, 5) = -(p.dot(dr2) + dty);
jac(1, 3) = -(p.dot(dr3) + dtz);
jac(1, 4) = 0.0f;
jac(1, 5) = p.dot(dr1) + dtx;
jac(2, 3) = p.dot(dr2) + dty;
jac(2, 4) = -(p.dot(dr1) + dtx);
jac(2, 5) = 0.0f;
jac = A.topLeftCorner(3, 3) * jac;
return jac;
}
__inline__ __device__ void computeJacobianBlockRow_i(Vec6f &jacBlockRow, const Mat4f &transform_i,
const Mat4f &invTransform_j, const Vec4f &camPosSrc,
const Vec4f &normalTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f normalTgt_ = normalTgt.head(3);
Mat36f jac = evalLie_derivI(invTransform_j, transform_i, camPosSrc_);
for (unsigned int i = 0; i < 6; i++) {
Vec3f v(jac(0, i), jac(1, i), jac(2, i));
jacBlockRow(i) = -v.dot(normalTgt_); //rot
}
}
__inline__ __device__ void computeJacobianBlockRow_j(Vec6f &jacBlockRow, const Mat4f &invTransform_i,
const Mat4f &transform_j, const Vec4f &camPosSrc,
const Vec4f &normalTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f normalTgt_ = normalTgt.head(3);
Mat36f jac = evalLie_derivJ(invTransform_i, transform_j, camPosSrc_);
for (unsigned int i = 0; i < 6; i++) {
Vec3f v(jac(0, i), jac(1, i), jac(2, i));
jacBlockRow(i) = -v.dot(normalTgt_); //rot
}
}
__inline__ __device__ void
computeJacobianBlockIntensityRow_i(Vec6f &jacBlockRow, const Vec2f &colorFocal, const Mat4f &transform_i,
const Mat4f &invTransform_j, const Vec4f &camPosSrc, const Vec4f &camPosSrcToTgt,
const Vec2f &intensityDerivTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f camPosSrcToTgt_ = camPosSrcToTgt.head(3);
Mat36f jac = evalLie_derivI(invTransform_j, transform_i, camPosSrc_);
Mat23f dProj = dCameraToScreen(camPosSrcToTgt_, colorFocal(0), colorFocal(1));
Vec2f dColorB = intensityDerivTgt;
jacBlockRow = jac.transpose() * dProj.transpose() * dColorB;
}
__inline__ __device__ void
computeJacobianBlockIntensityRow_j(Vec6f &jacBlockRow, const Vec2f &colorFocal, const Mat4f &invTransform_i,
const Mat4f &transform_j, const Vec4f &camPosSrc, const Vec4f &camPosSrcToTgt,
const Vec2f &intensityDerivTgt) {
Vec3f camPosSrc_ = camPosSrc.head(3);
Vec3f camPosSrcToTgt_ = camPosSrcToTgt.head(3);
Mat36f jac = evalLie_derivJ(invTransform_i, transform_j, camPosSrc_);
Mat23f dProj = dCameraToScreen(camPosSrcToTgt_, colorFocal(0), colorFocal(1));
Vec2f dColorB = intensityDerivTgt;
jacBlockRow = jac.transpose() * dProj.transpose() * dColorB;
}
////////////////////////////////////////
// build jtj/jtr
////////////////////////////////////////
__inline__ __device__ void
addToLocalSystem(bool isValidCorr, float *d_JtJ, float *d_Jtr, float *d_J, unsigned int dim,
const Vec6f &jacobianBlockRow_i, const Vec6f &jacobianBlockRow_j,
unsigned int vi, unsigned int vj, float residual, float weight, unsigned int tidx) {
//fill in bottom half (vi < vj) -> x < y
for (unsigned int i = 0; i < 6; i++) {
for (unsigned int j = i; j < 6; j++) {
float dii = 0.0f;
float djj = 0.0f;
float dij = 0.0f;
float dji = 0.0f;
__shared__ float s_partJtJ[4];
if (tidx == 0) {
for (unsigned int k = 0; k < 4; k++)
s_partJtJ[k] = 0;
} //TODO try with first 4 threads for all tidx == 0
if (isValidCorr) {
if (vi > 0) {
dii = jacobianBlockRow_i(i) * jacobianBlockRow_i(j) * weight;
}
if (vj > 0) {
djj = jacobianBlockRow_j(i) * jacobianBlockRow_j(j) * weight;
}
if (vi > 0 && vj > 0) {
dij = jacobianBlockRow_i(i) * jacobianBlockRow_j(j) * weight;
if (i != j) {
dji = jacobianBlockRow_i(j) * jacobianBlockRow_j(i) * weight;
}
}
}
dii = warpReduce(dii);
djj = warpReduce(djj);
dij = warpReduce(dij);
dji = warpReduce(dji);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
atomicAdd(&s_partJtJ[0], dii);
atomicAdd(&s_partJtJ[1], djj);
atomicAdd(&s_partJtJ[2], dij);
atomicAdd(&s_partJtJ[3], dji);
}
__syncthreads();
if (tidx == 0) {
atomicAdd(&d_JtJ[(vi * 6 + j) * dim + (vi * 6 + i)], s_partJtJ[0]);
atomicAdd(&d_JtJ[(vj * 6 + j) * dim + (vj * 6 + i)], s_partJtJ[1]);
////JitJj 和 JjtJi 互为转置,填充在一个矩阵块的上下两个半区
atomicAdd(&d_JtJ[(vj * 6 + j) * dim + (vi * 6 + i)], s_partJtJ[2]);
atomicAdd(&d_JtJ[(vj * 6 + i) * dim + (vi * 6 + j)], s_partJtJ[3]);
}
}
float jtri = 0.0f;
float jtrj = 0.0f;
__shared__ float s_partJtr[2];
if (tidx == 0) { for (unsigned int k = 0; k < 2; k++) s_partJtr[k] = 0; }
if (isValidCorr) {
if (vi > 0) jtri = jacobianBlockRow_i(i) * residual * weight;
if (vj > 0) jtrj = jacobianBlockRow_j(i) * residual * weight;
}
jtri = warpReduce(jtri);
jtrj = warpReduce(jtrj);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
atomicAdd(&s_partJtr[0], jtri);
atomicAdd(&s_partJtr[1], jtrj);
}
__syncthreads();
if (tidx == 0) {
atomicAdd(&d_Jtr[vi * 6 + i], s_partJtr[0]);
atomicAdd(&d_Jtr[vj * 6 + i], s_partJtr[1]);
}
#ifdef DEBUG
float Ji = 0.f;
float Jj = 0.f;
if (isValidCorr) {
if (vi > 0)
Ji = jacobianBlockRow_i(i);
if (vj > 0)
Jj = jacobianBlockRow_j(i);
}
Ji = warpReduce(Ji);
Jj = warpReduce(Jj);
if (tidx % WARP_SIZE == 0) {
atomicAdd(&d_J[vi * 6 + i], Ji);
atomicAdd(&d_J[vj * 6 + i], Jj);
}
#endif
}
}
//寻找帧对,为了加速,只计算部分区域
__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters) {
// image indices
unsigned int i, j; // project from j to i
i = blockIdx.x;
j = blockIdx.y; // all pairwise
if (i >= j) return;
const unsigned int tidx = threadIdx.x;
const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor;
const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int idx = y * input.denseDepthWidth + x;
if (idx < (input.denseDepthWidth * input.denseDepthHeight)) {
Mat4f transform = state.d_xTransformInverses[i] * state.d_xTransforms[j];
//if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees
if (!computeAngleDiff(transform, 0.52f)) return; //TODO ~30 degrees
// find correspondence
__shared__ int foundCorr[1];
foundCorr[0] = 0;
__syncthreads();
if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, state.d_xTransformInverses[i],
state.d_xTransforms[j], input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax,
parameters.boundingMin, parameters.boundingMax)) { //i tgt, j src
atomicAdd(foundCorr, 1);
} // found correspondence
__syncthreads();
if (tidx == 0) {
if (foundCorr[0] > parameters.minNumOverlapCorr) {
int addr = atomicAdd(state.d_numDenseOverlappingImages, 1);
state.d_denseOverlappingImages[addr] = make_uint2(i, j);
}
}
} // valid image pixel
}
__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters) {
const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x;
unsigned int j = imageIndices.y;
const unsigned int tidx = threadIdx.x;
const unsigned int gidx = tidx * gridDim.y + blockIdx.y;
if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) {
// find correspondence
const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE;
__shared__ int s_count[numWarps];
s_count[0] = 0;
int count = 0;
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, state.d_xTransformInverses[i],
state.d_xTransforms[j], input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax,
parameters.boundingMin, parameters.boundingMax)) { //i tgt, j src
count++;
} // found correspondence
count = warpReduce(count);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
s_count[tidx / WARP_SIZE] = count;
}
__syncthreads();
for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) {
if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride];
__syncthreads();
}
if (tidx == 0) {
atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]);
}
} // valid image pixel
}
__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state, SolverParameters parameters) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// apply ln to weights
float x = state.d_denseCorrCounts[idx];
if (x > 0) {
if (x < parameters.minNumDenseCorr) {//TODO change
state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr
} else {
state.d_denseCorrCounts[idx] = 1.0f / fminf(logf(x), 9.0f); // natural log
}
}
}
}
template<bool useDepth, bool useColor>
__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters) {
const int imPairIdx = blockIdx.x;
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x;
unsigned int j = imageIndices.y;
float imPairWeight = state.d_denseCorrCounts[imPairIdx];
if (imPairWeight == 0.0f) return;
const unsigned int idx = threadIdx.x;
const unsigned int srcIdx = idx * gridDim.y + blockIdx.y;
if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) {
Mat4f transform_i = state.d_xTransforms[i];
Mat4f transform_j = state.d_xTransforms[j];
Mat4f invTransform_i = state.d_xTransformInverses[i];
Mat4f invTransform_j = state.d_xTransformInverses[j];
// point-to-plane term
Vec6f depthJacBlockRow_i, depthJacBlockRow_j;
depthJacBlockRow_i.setZero();
depthJacBlockRow_j.setZero();
float depthRes = 0.0f;
float depthWeight = 0.0f;
// color term
Vec6f colorJacBlockRow_i, colorJacBlockRow_j;
colorJacBlockRow_i.setZero();
colorJacBlockRow_j.setZero();
float colorRes = 0.0f;
float colorWeight = 0.0f;
// find correspondence
Vec4f camPosSrc;
Vec4f camPosSrcToTgt;
Vec4f camPosTgt;
Vec4f normalTgt;
Vec2f tgtScreenPos;
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh,
invTransform_i, transform_j, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled,
input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_cameraposDownsampled,
input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax,
parameters.boundingMin, parameters.boundingMax,
camPosSrc, camPosSrcToTgt,
tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
if (useDepth) {
if (foundCorr) {
// point-to-plane residual
Vec4f diff = camPosTgt - camPosSrcToTgt;
depthRes = diff.dot(normalTgt);
depthWeight = parameters.weightDenseDepth * imPairWeight *
(powf(fmaxf(0.0f, 1.0f - camPosTgt(2) / (2.0f * parameters.denseDepthMax)),
2.5f)); //fr2_xyz_half
if (i > 0)
computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt);
if (j > 0)
computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt);
}
#ifdef DEBUG
float res = 0.f;
int num = 0;
if (foundCorr) {
res = depthRes;
num = 1;
}
res = warpReduce(res);
num = warpReduce(num);
if (idx % WARP_SIZE == 0) {
atomicAdd(&state.d_sumResidualDEBUG[imPairIdx], res);
atomicAdd(&state.d_numCorrDEBUG[imPairIdx], num);
}
#endif
addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, state.d_J, input.numberOfImages * 6,
depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx);
}
if (useColor) {
bool foundCorrColor = false;
if (foundCorr) {
const Vec2f intensityDerivTgt = bilinearInterpolation(tgtScreenPos(0), tgtScreenPos(1),
input.d_cacheFrames[i].d_intensityDerivsDownsampled,
input.denseDepthWidth,
input.denseDepthHeight);
const float intensityTgt = bilinearInterpolation(tgtScreenPos(0), tgtScreenPos(1),
input.d_cacheFrames[i].d_intensityDownsampled,
input.denseDepthWidth, input.denseDepthHeight);
colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx];
foundCorrColor = (intensityTgt != FLT_MAX && intensityDerivTgt(0) != FLT_MAX &&
abs(colorRes) < parameters.denseColorThresh &&
intensityDerivTgt.norm() > parameters.denseColorGradientMin);
if (foundCorrColor) {
const Vec2f focalLength(input.intrinsics(0), input.intrinsics(1));
if (i > 0)
computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j,
camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0)
computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j,
camPosSrc, camPosSrcToTgt, intensityDerivTgt);
colorWeight = parameters.weightDenseColor * imPairWeight *
fmaxf(0.0f, 1.0f - abs(colorRes) / (1.15f * parameters.denseColorThresh));
}
}
#ifdef DEBUG
float res_c = 0.f;
int num_c = 0;
if (foundCorrColor) {
res_c = colorRes;
num_c = 1;
}
res_c = warpReduce(res_c);
num_c = warpReduce(num_c);
if (idx % WARP_SIZE == 0) {
atomicAdd(&state.d_sumResidualColorDEBUG[imPairIdx], res_c);
atomicAdd(&state.d_numCorrColorDEBUG[imPairIdx], num_c);
}
#endif
addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, state.d_J, input.numberOfImages * 6,
colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx);
}
} // valid image pixel
}
__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float *d_JtJ) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total) {
const unsigned int x = idx % dim;
const unsigned int y = idx / dim;
if (x > y) {
d_JtJ[y * dim + x] = d_JtJ[x * dim + y];
}
}
}
bool BuildDenseSystem(const SolverInput &input, SolverState &state, SolverParameters ¶meters) {
const unsigned int N = input.numberOfImages;
const int sizeJtr = 6 * N;
const int sizeJtJ = sizeJtr * sizeJtr;
const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2;
CUDA_SAFE_CALL(cudaMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs));
CUDA_SAFE_CALL(cudaMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ));
CUDA_SAFE_CALL(cudaMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr));
CUDA_SAFE_CALL(cudaMemset(state.d_numDenseOverlappingImages, 0, sizeof(int)));
#ifdef DEBUG
CUDA_SAFE_CALL(cudaMemset(state.d_sumResidualDEBUG, 0, sizeof(float) * maxDenseImPairs));
CUDA_SAFE_CALL(cudaMemset(state.d_numCorrDEBUG, 0, sizeof(int) * maxDenseImPairs));
CUDA_SAFE_CALL(cudaMemset(state.d_sumResidualColorDEBUG, 0, sizeof(float) * maxDenseImPairs));
CUDA_SAFE_CALL(cudaMemset(state.d_numCorrColorDEBUG, 0, sizeof(int) * maxDenseImPairs));
CUDA_SAFE_CALL(cudaMemset(state.d_J, 0, sizeof(float) * sizeJtr));
#endif
dim3 gridImImOverlap(N, N, 1);
FindImageImageCorr_Kernel << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> > (input, state, parameters);
int numOverlapImagePairs;
CUDA_SAFE_CALL(cudaMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages,
sizeof(int), cudaMemcpyDeviceToHost));
if (numOverlapImagePairs == 0) {
printf("warning: no overlapping images for dense solve\n");
return false;
}
const int reductionGlobal = (input.denseDepthWidth * input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) /
THREADS_PER_BLOCK_DENSE_DEPTH;
dim3 grid(numOverlapImagePairs, reductionGlobal);
FindDenseCorrespondences_Kernel << < grid, THREADS_PER_BLOCK_DENSE_DEPTH >> > (input, state, parameters);
int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >
(maxDenseImPairs, state, parameters);
bool useDepth = parameters.weightDenseDepth > 0.0f;
bool useColor = parameters.weightDenseColor > 0.0f;
if (useDepth && useColor)
BuildDenseSystem_Kernel<true, true> << < grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >
(input, state, parameters);
else if (useDepth)
BuildDenseSystem_Kernel<true, false> << < grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >
(input, state, parameters);
else {
printf("useDepth and useColor error!\n");
return false;
}
const unsigned int flipgrid =
(sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
FlipJtJ_Kernel << < flipgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> > (sizeJtJ, sizeJtr, state.d_denseJtJ);
#ifdef DEBUG
uint2 *denseOverlappingImages = new uint2[numOverlapImagePairs];
CUDA_SAFE_CALL(cudaMemcpy(denseOverlappingImages, state.d_denseOverlappingImages,
sizeof(uint2) * numOverlapImagePairs, cudaMemcpyDeviceToHost));
float *denseCorrCounts = new float[numOverlapImagePairs];
CUDA_SAFE_CALL(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts,
sizeof(float) * numOverlapImagePairs, cudaMemcpyDeviceToHost));
float *sumResidualDEBUG = new float[numOverlapImagePairs];
int *numCorrDEBUG = new int[numOverlapImagePairs];
CUDA_SAFE_CALL(cudaMemcpy(sumResidualDEBUG, state.d_sumResidualDEBUG, sizeof(float) * numOverlapImagePairs,
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(numCorrDEBUG, state.d_numCorrDEBUG, sizeof(float) * numOverlapImagePairs,
cudaMemcpyDeviceToHost));
float *sumResidualColorDEBUG = new float[numOverlapImagePairs];
int *numCorrColorDEBUG = new int[numOverlapImagePairs];
CUDA_SAFE_CALL(cudaMemcpy(sumResidualColorDEBUG, state.d_sumResidualColorDEBUG,
sizeof(float) * numOverlapImagePairs,
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(numCorrColorDEBUG, state.d_numCorrColorDEBUG, sizeof(float) * numOverlapImagePairs,
cudaMemcpyDeviceToHost));
// float *J = new float[sizeJtr];
// CUDA_SAFE_CALL(cudaMemcpy(J, state.d_J, sizeof(float) * sizeJtr, cudaMemcpyDeviceToHost));
printf("image pair num: %d\n", numOverlapImagePairs);
for (int i = 0; i < numOverlapImagePairs; ++i) {
printf("image pair (%d, %d): %f %d %f %d %f\n", denseOverlappingImages[i].x, denseOverlappingImages[i].y,
denseCorrCounts[i], numCorrDEBUG[i], sumResidualDEBUG[i],
numCorrColorDEBUG[i], sumResidualColorDEBUG[i]);
}
// printf("J:\n");
// for (int i = 0; i < sizeJtr; ++i) {
// printf("%f ", J[i]);
// }
printf("\n");
delete[] denseOverlappingImages;
delete[] denseCorrCounts;
delete[] sumResidualDEBUG;
delete[] numCorrDEBUG;
// delete[] J;
#endif
return true;
}
float EvalGNConvergence(SolverInput &input, SolverState &state) {
const unsigned int N = input.numberOfImages;
Vec3f *deltaRot = new Vec3f[N];
Vec3f *deltaTrans = new Vec3f[N];
CUDA_SAFE_CALL(cudaMemcpy(deltaRot, state.d_deltaRot, sizeof(Vec3f) * N, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(deltaTrans, state.d_deltaTrans, sizeof(Vec3f) * N, cudaMemcpyDeviceToHost));
float maxVal = 0.f;
for (int i = 0; i < N; ++i) {
float r1 = deltaRot[i].cwiseAbs().maxCoeff();
float r2 = deltaTrans[i].cwiseAbs().maxCoeff();
maxVal = fmaxf(maxVal, fmaxf(r1, r2));
}
delete[] deltaRot;
delete[] deltaTrans;
return maxVal;
}
void solve(SolverInput &input, SolverState &state, SolverParameters ¶meters) {
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++) {
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
convertLiePosesToMatrices(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms,
state.d_xTransformInverses);
bool ok = BuildDenseSystem(input, state, parameters);
if (!ok) {
printf("solve failed!\n");
break;
}
#ifdef DEBUG
float *denseJtJ = new float[36 * input.numberOfImages * input.numberOfImages];
float *denseJtr = new float[6 * input.numberOfImages];
cudaMemcpy(denseJtJ, state.d_denseJtJ, 36 * input.numberOfImages * input.numberOfImages,
cudaMemcpyDeviceToHost);
cudaMemcpy(denseJtr, state.d_denseJtr, 6 * input.numberOfImages, cudaMemcpyDeviceToHost);
// printf("denseJtJ:\n");
// for (int k = 0; k < input.numberOfImages * 6; ++k) {
// for (int m = 0; m < input.numberOfImages * 6; ++m) {
// printf("%f ", denseJtJ[k * input.numberOfImages * 6 + m]);
// }
// printf("\n");
// }
printf("denseJtr:\n");
for (int m = 0; m < input.numberOfImages * 6; ++m) {
printf("%f ", denseJtr[m]);
}
printf("\n");
delete[] denseJtJ;
delete[] denseJtr;
#endif
Initialization(input, state, parameters);
#ifdef DEBUG
Vec3f *rRot = new Vec3f[input.numberOfImages];
Vec3f *rTrans = new Vec3f[input.numberOfImages];
Vec3f *zRot = new Vec3f[input.numberOfImages];
Vec3f *zTrans = new Vec3f[input.numberOfImages];
Vec3f *pRot = new Vec3f[input.numberOfImages];
Vec3f *pTrans = new Vec3f[input.numberOfImages];
cudaMemcpy(rRot, state.d_rRot, sizeof(Vec3f) * input.numberOfImages, cudaMemcpyDeviceToHost);
cudaMemcpy(rTrans, state.d_rTrans, sizeof(Vec3f) * input.numberOfImages, cudaMemcpyDeviceToHost);
cudaMemcpy(zRot, state.d_zRot, sizeof(Vec3f) * input.numberOfImages, cudaMemcpyDeviceToHost);
cudaMemcpy(zTrans, state.d_zTrans, sizeof(Vec3f) * input.numberOfImages, cudaMemcpyDeviceToHost);
cudaMemcpy(pRot, state.d_pRot, sizeof(Vec3f) * input.numberOfImages, cudaMemcpyDeviceToHost);
cudaMemcpy(pTrans, state.d_pTrans, sizeof(Vec3f) * input.numberOfImages, cudaMemcpyDeviceToHost);
for (int k = 0; k < input.numberOfImages; ++k) {
std::cout << rRot[k] << std::endl;
std::cout << rTrans[k] << std::endl;
// std::cout << zRot[k] << std::endl;
// std::cout << zTrans[k] << std::endl;
// std::cout << pRot[k] << std::endl;
// std::cout << pTrans[k] << std::endl;
}
delete[] rRot;
delete[] rTrans;
delete[] zRot;
delete[] zTrans;
delete[] pRot;
delete[] pTrans;
#endif
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
if (PCGIteration(input, state, parameters, linIter == parameters.nLinIterations - 1)) {
break;
}
}
if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state) < 0.005f) {
printf("EARLY OUT\n");
break;
}
}
convertLiePosesToMatrices(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms,
state.d_xTransformInverses);
cudaDeviceSynchronize();
}
__global__ void copyVec3ToVec4_kernel(Vec4f *dst, Vec3f *src, int num, float w) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num) {
dst[id] = Vec4f(src[id](0), src[id](1), src[id](2), w);
}
}
void copyVec3ToVec4(Vec4f *dst, Vec3f *src, int num, float w) {
copyVec3ToVec4_kernel << < (num + 8 - 1) / 8, 8 >> > (dst, src, num, w);
} |
166a1c1a0bd3c0eafd0ccc74472945ae6a7a3757.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <opencv2/opencv.hpp>
#include <fstream>
#include <string>
#include <thread>
#include <chrono>
using namespace cv;
using namespace std;
// Kernel function to filter an image and place copy in imgW
// then go through and mean them
__global__
void filterMean(int *imgR, int *imgW, int batchSize, int width, int dim, int num_colors, int tol)
{
int A = batchSize*dim;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < batchSize*dim; i += stride){
int bn = i/(dim*num_colors);//batch number
int x = i%width;
int y = (i%dim)/width;
int r = imgR[i];
int g = imgR[i+A];
int b = imgR[i+2*A];
int a = (y*width+x)+(dim*bn);
//coord=i
//printf("before %d\n",r);
if(x<(width/2)){
//search right
if(y<(width/2)){
//search down
while(r==0 || (b-r)>tol || (r-b)<-tol){
x++;
y++;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
else{
//search up
while(r==0 || (b-r)>tol || (r-b)<-tol){
x++;
y--;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
}
else{
//search left
if(y<(width/2)){
//search down
while(r==0 || (b-r)>tol || (r-b)<-tol){
x--;
y++;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
else{
while(r==0 || (b-r)>tol || (r-b)<-tol){
x--;
y--;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
}
imgW[i] = r;
if(i%width==256 && (i%dim)/width==256){
printf("r %d imgW %d imgR %d\n",r,imgW[i],imgR[i]);
}
}
}
__global__
void sumBatch(int *imgR, int *imgW, int batchSize, int dim){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//now filtered images are in imgW
for (int i = index; i < dim; i += stride){
for (int j = 0; j<batchSize; j++){
imgR[i] = imgW[i+j*dim];
}
}
}
__global__
void divideBatch(int *imgR, int batchSize, int dim){
//now imgR holds sum
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
int a = imgR[i];
imgR[i] = a/batchSize;
}
}
__global__
void meanBatch(int *imgR, float *meanImg, int batchNum, int dim){
//now imgR holds sum
int width = 1024;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
int a = imgR[i];
float b = meanImg[i];
if(i%width==256 && (i%dim)/width==256){
printf("before %d %d %d\n",i,imgR[i],meanImg[i]);
}
meanImg[i] = b+((a-b)/((float)batchNum));
if(i%width==256 && (i%dim)/width==256){
printf("after %d %d %d\n",i,imgR[i],meanImg[i]);
}
}
}
void displayImage(Mat imgMat, int *img){
int i = 0;
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
int c = img[i];
imgMat.at<Vec3b>(Point(x,y)) = Vec3b(c,c,c);
i++;
}
}
namedWindow( "image", WINDOW_AUTOSIZE );
imshow( "Image", imgMat );
waitKey(0);
}
void displayImage(Mat imgMat, float *img){
int i = 0;
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
int c = img[i];
imgMat.at<Vec3b>(Point(x,y)) = Vec3b(c,c,c);
i++;
}
}
namedWindow( "image", WINDOW_AUTOSIZE );
imshow( "Image", imgMat );
waitKey(0);
}
int main(void)
{
const int NUM_COLORS = 3; //number of colors
const int IMG_WIDTH = 1024;
const int IMG_DIM = IMG_WIDTH*IMG_WIDTH;
const int IMG_DIM_COLORS = IMG_DIM*NUM_COLORS;
const int TOL = 30;
int device_count;
size_t max_mem = 0;
int best_device = 0;
hipGetDeviceCount(&device_count);
for(int i = 0; i<device_count; i++){
size_t curAvailMem, totalMem;
hipSetDevice(i);
hipMemGetInfo(&curAvailMem, &totalMem);
printf("%zd %zd %zd\n",i,curAvailMem, totalMem);
if(curAvailMem>max_mem){
max_mem = curAvailMem;
best_device = i;
}
}
printf("Best device is %d with %zd free memory\n",best_device,max_mem);
hipSetDevice(best_device);
int NUM_FILES = 0;
string line;
ifstream myfile ("images.txt");
if (myfile.is_open()){
while ( getline (myfile,line) ){
NUM_FILES++;
}
}
auto begin = std::chrono::high_resolution_clock::now();
float *meanImg;
Mat imgMat;
float avgDur = 0.0f;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&meanImg, IMG_DIM*sizeof(float));
//open images
ifstream myfile2 ("images.txt");
if (myfile2.is_open()){
int num_batchs_proc = 0;
int num_imgs_proc = 0;
while ( num_imgs_proc < NUM_FILES ){
int *batch;
int *batchMean;
size_t curAvailMem;
size_t totalMem;
hipMemGetInfo(&curAvailMem, &totalMem);
int batchSize = (curAvailMem-IMG_DIM*sizeof(int)+IMG_DIM*sizeof(float))/(8*IMG_DIM_COLORS*sizeof(int))-1;
printf("%d %zd %zd\n",batchSize, curAvailMem, totalMem);
if(batchSize+num_imgs_proc>NUM_FILES){
batchSize = NUM_FILES-num_imgs_proc;
}
batchSize = 2;
printf("%d %zd %zd\n",batchSize, curAvailMem, totalMem);
hipMallocManaged(&batch, batchSize*IMG_DIM_COLORS*sizeof(int));
hipMallocManaged(&batchMean, batchSize*IMG_DIM*sizeof(int));
hipMemGetInfo(&curAvailMem, &totalMem);
printf("%d %zd %zd\n",batchSize, curAvailMem, totalMem);
printf("initilize batch\n");
//initilize batch
int i = 0;
for(int b =0; b<batchSize; b++){
getline(myfile2, line);
imgMat = imread( line, IMREAD_COLOR );
printf("%d %d\n",b,batchSize);
//printf("%zd %zd\n",i,i+2*IMG_DIM*b);
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
//segfaulting somwhere between batch[1796210688] and batch[1799356416]
Vec3b color = imgMat.at<Vec3b>(Point(x,y));
batch[i] = (int) color[0];
batch[i+IMG_DIM*b] = (int) color[1];
batch[i+2*IMG_DIM*b] = (int) color[2];
if(batch[i]<0 || batch[i+IMG_DIM*b]<0 || batch[i+2*IMG_DIM*b]<0){
printf("%d %d %d %d %d",i,b,batch[i],batch[i+IMG_DIM*b],batch[i+2*IMG_DIM*b]);
}
batchMean[i] = 0;
i++;
}
}
}
printf("Batch Sample %d\n", batch[(batchSize/2)*(IMG_DIM_COLORS/2)]);
int blockSize = 256;
int numBlocks = (IMG_DIM*batchSize + blockSize - 1) / blockSize;
printf("%d\n", batchSize);
hipLaunchKernelGGL(( filterMean), dim3(numBlocks), dim3(blockSize), 0, 0, batch, batchMean, batchSize, IMG_WIDTH, IMG_DIM, NUM_COLORS, TOL);
hipDeviceSynchronize();
num_batchs_proc++;
blockSize = 256;
numBlocks = (IMG_DIM + blockSize - 1) / blockSize;
printf("%d\n", num_batchs_proc);
hipLaunchKernelGGL(( sumBatch), dim3(numBlocks),dim3(blockSize), 0, 0, batch,batchMean,batchSize,IMG_DIM);
hipDeviceSynchronize();
hipLaunchKernelGGL(( divideBatch), dim3(numBlocks),dim3(blockSize), 0, 0, batch,batchSize,IMG_DIM);
hipDeviceSynchronize();
hipLaunchKernelGGL(( meanBatch), dim3(numBlocks),dim3(blockSize), 0, 0, batch,meanImg,num_batchs_proc,IMG_DIM);
hipDeviceSynchronize();
printf("Mean Sample %d\n", batchMean[IMG_DIM/2]);
printf("%d\n", num_batchs_proc);
num_imgs_proc+= batchSize;
printf("%d\n",num_imgs_proc);
hipFree(batch);
hipFree(batchMean);
break;
}
}
//Reconstruct Mat
int i = 0;
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
int c = meanImg[i];
imgMat.at<Vec3b>(Point(x,y)) = Vec3b(c,c,c);
i++;
}
}
hipFree(meanImg);
namedWindow( line, WINDOW_AUTOSIZE );
imwrite("./mean.jpg",imgMat);
imshow( line, imgMat );
waitKey(0);
return 0;
}
//export PATH=/usr/local/cuda-10.0/bin${PATH:+:${PATH}}
//nvcc -o batchesFixed batchesFixed.cu `pkg-config opencv --cflags --libs` -std=c++11 | 166a1c1a0bd3c0eafd0ccc74472945ae6a7a3757.cu | #include <iostream>
#include <math.h>
#include <opencv2/opencv.hpp>
#include <fstream>
#include <string>
#include <thread>
#include <chrono>
using namespace cv;
using namespace std;
// Kernel function to filter an image and place copy in imgW
// then go through and mean them
__global__
void filterMean(int *imgR, int *imgW, int batchSize, int width, int dim, int num_colors, int tol)
{
int A = batchSize*dim;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < batchSize*dim; i += stride){
int bn = i/(dim*num_colors);//batch number
int x = i%width;
int y = (i%dim)/width;
int r = imgR[i];
int g = imgR[i+A];
int b = imgR[i+2*A];
int a = (y*width+x)+(dim*bn);
//coord=i
//printf("before %d\n",r);
if(x<(width/2)){
//search right
if(y<(width/2)){
//search down
while(r==0 || (b-r)>tol || (r-b)<-tol){
x++;
y++;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
else{
//search up
while(r==0 || (b-r)>tol || (r-b)<-tol){
x++;
y--;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
}
else{
//search left
if(y<(width/2)){
//search down
while(r==0 || (b-r)>tol || (r-b)<-tol){
x--;
y++;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
else{
while(r==0 || (b-r)>tol || (r-b)<-tol){
x--;
y--;
a = (y*width+x)+(dim*bn);
r = imgR[a];
g = imgR[a+A];
b = imgR[a+2*A];
}
}
}
imgW[i] = r;
if(i%width==256 && (i%dim)/width==256){
printf("r %d imgW %d imgR %d\n",r,imgW[i],imgR[i]);
}
}
}
__global__
void sumBatch(int *imgR, int *imgW, int batchSize, int dim){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//now filtered images are in imgW
for (int i = index; i < dim; i += stride){
for (int j = 0; j<batchSize; j++){
imgR[i] = imgW[i+j*dim];
}
}
}
__global__
void divideBatch(int *imgR, int batchSize, int dim){
//now imgR holds sum
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
int a = imgR[i];
imgR[i] = a/batchSize;
}
}
__global__
void meanBatch(int *imgR, float *meanImg, int batchNum, int dim){
//now imgR holds sum
int width = 1024;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
int a = imgR[i];
float b = meanImg[i];
if(i%width==256 && (i%dim)/width==256){
printf("before %d %d %d\n",i,imgR[i],meanImg[i]);
}
meanImg[i] = b+((a-b)/((float)batchNum));
if(i%width==256 && (i%dim)/width==256){
printf("after %d %d %d\n",i,imgR[i],meanImg[i]);
}
}
}
void displayImage(Mat imgMat, int *img){
int i = 0;
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
int c = img[i];
imgMat.at<Vec3b>(Point(x,y)) = Vec3b(c,c,c);
i++;
}
}
namedWindow( "image", WINDOW_AUTOSIZE );
imshow( "Image", imgMat );
waitKey(0);
}
void displayImage(Mat imgMat, float *img){
int i = 0;
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
int c = img[i];
imgMat.at<Vec3b>(Point(x,y)) = Vec3b(c,c,c);
i++;
}
}
namedWindow( "image", WINDOW_AUTOSIZE );
imshow( "Image", imgMat );
waitKey(0);
}
int main(void)
{
const int NUM_COLORS = 3; //number of colors
const int IMG_WIDTH = 1024;
const int IMG_DIM = IMG_WIDTH*IMG_WIDTH;
const int IMG_DIM_COLORS = IMG_DIM*NUM_COLORS;
const int TOL = 30;
int device_count;
size_t max_mem = 0;
int best_device = 0;
cudaGetDeviceCount(&device_count);
for(int i = 0; i<device_count; i++){
size_t curAvailMem, totalMem;
cudaSetDevice(i);
cudaMemGetInfo(&curAvailMem, &totalMem);
printf("%zd %zd %zd\n",i,curAvailMem, totalMem);
if(curAvailMem>max_mem){
max_mem = curAvailMem;
best_device = i;
}
}
printf("Best device is %d with %zd free memory\n",best_device,max_mem);
cudaSetDevice(best_device);
int NUM_FILES = 0;
string line;
ifstream myfile ("images.txt");
if (myfile.is_open()){
while ( getline (myfile,line) ){
NUM_FILES++;
}
}
auto begin = std::chrono::high_resolution_clock::now();
float *meanImg;
Mat imgMat;
float avgDur = 0.0f;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&meanImg, IMG_DIM*sizeof(float));
//open images
ifstream myfile2 ("images.txt");
if (myfile2.is_open()){
int num_batchs_proc = 0;
int num_imgs_proc = 0;
while ( num_imgs_proc < NUM_FILES ){
int *batch;
int *batchMean;
size_t curAvailMem;
size_t totalMem;
cudaMemGetInfo(&curAvailMem, &totalMem);
int batchSize = (curAvailMem-IMG_DIM*sizeof(int)+IMG_DIM*sizeof(float))/(8*IMG_DIM_COLORS*sizeof(int))-1;
printf("%d %zd %zd\n",batchSize, curAvailMem, totalMem);
if(batchSize+num_imgs_proc>NUM_FILES){
batchSize = NUM_FILES-num_imgs_proc;
}
batchSize = 2;
printf("%d %zd %zd\n",batchSize, curAvailMem, totalMem);
cudaMallocManaged(&batch, batchSize*IMG_DIM_COLORS*sizeof(int));
cudaMallocManaged(&batchMean, batchSize*IMG_DIM*sizeof(int));
cudaMemGetInfo(&curAvailMem, &totalMem);
printf("%d %zd %zd\n",batchSize, curAvailMem, totalMem);
printf("initilize batch\n");
//initilize batch
int i = 0;
for(int b =0; b<batchSize; b++){
getline(myfile2, line);
imgMat = imread( line, IMREAD_COLOR );
printf("%d %d\n",b,batchSize);
//printf("%zd %zd\n",i,i+2*IMG_DIM*b);
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
//segfaulting somwhere between batch[1796210688] and batch[1799356416]
Vec3b color = imgMat.at<Vec3b>(Point(x,y));
batch[i] = (int) color[0];
batch[i+IMG_DIM*b] = (int) color[1];
batch[i+2*IMG_DIM*b] = (int) color[2];
if(batch[i]<0 || batch[i+IMG_DIM*b]<0 || batch[i+2*IMG_DIM*b]<0){
printf("%d %d %d %d %d",i,b,batch[i],batch[i+IMG_DIM*b],batch[i+2*IMG_DIM*b]);
}
batchMean[i] = 0;
i++;
}
}
}
printf("Batch Sample %d\n", batch[(batchSize/2)*(IMG_DIM_COLORS/2)]);
int blockSize = 256;
int numBlocks = (IMG_DIM*batchSize + blockSize - 1) / blockSize;
printf("%d\n", batchSize);
filterMean<<<numBlocks, blockSize>>>(batch, batchMean, batchSize, IMG_WIDTH, IMG_DIM, NUM_COLORS, TOL);
cudaDeviceSynchronize();
num_batchs_proc++;
blockSize = 256;
numBlocks = (IMG_DIM + blockSize - 1) / blockSize;
printf("%d\n", num_batchs_proc);
sumBatch<<<numBlocks,blockSize>>>(batch,batchMean,batchSize,IMG_DIM);
cudaDeviceSynchronize();
divideBatch<<<numBlocks,blockSize>>>(batch,batchSize,IMG_DIM);
cudaDeviceSynchronize();
meanBatch<<<numBlocks,blockSize>>>(batch,meanImg,num_batchs_proc,IMG_DIM);
cudaDeviceSynchronize();
printf("Mean Sample %d\n", batchMean[IMG_DIM/2]);
printf("%d\n", num_batchs_proc);
num_imgs_proc+= batchSize;
printf("%d\n",num_imgs_proc);
cudaFree(batch);
cudaFree(batchMean);
break;
}
}
//Reconstruct Mat
int i = 0;
for(int y = 0; y<imgMat.rows; y++){
for(int x = 0; x<imgMat.cols; x++){
int c = meanImg[i];
imgMat.at<Vec3b>(Point(x,y)) = Vec3b(c,c,c);
i++;
}
}
cudaFree(meanImg);
namedWindow( line, WINDOW_AUTOSIZE );
imwrite("./mean.jpg",imgMat);
imshow( line, imgMat );
waitKey(0);
return 0;
}
//export PATH=/usr/local/cuda-10.0/bin${PATH:+:${PATH}}
//nvcc -o batchesFixed batchesFixed.cu `pkg-config opencv --cflags --libs` -std=c++11 |
c1c711820ec9293f63beb39946b79a5329dd2fb6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file quantized_conv.cu
* \brief
* \author Ziheng Jiang, Jun Wu
*/
#include "../nn/convolution-inl.h"
#include "./quantization_utils.h"
#include "../tensor/matrix_op-inl.h"
namespace mxnet {
namespace op {
// value + bias_value * (range1 / limit_range1) * (limit_range2 / range2)
struct QuantizedBiasAddKernel {
MSHADOW_XINLINE static void Map(int i, size_t bias_size, int32_t *out,
const int8_t *bias, const float *min_out,
const float *max_out, const float *min_bias,
const float *max_bias, const size_t spatial_size) {
using mshadow::red::limits::MinValue;
using mshadow::red::limits::MaxValue;
float float_for_one_out_quant =
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<int32_t>());
float float_for_one_bias_quant =
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<int8_t>());
const size_t channel_id = (i / spatial_size) % bias_size;
out[i] = (out[i] * float_for_one_out_quant +
bias[channel_id] * float_for_one_bias_quant) /
float_for_one_out_quant;
}
};
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
template<typename SrcType, typename DstType, typename CmpType>
class QuantizedCuDNNConvOp {
public:
QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
void Init(const ConvolutionParam& param,
const OpContext& ctx,
const std::vector<TShape>& in_shape,
const std::vector<TShape>& out_shape) {
param_ = param;
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "QuantizedConvOp only supports NCHW for now";
}
if (param_.stride.ndim() == 0U) param_.stride = mshadow::Shape2(1, 1);
if (param_.dilate.ndim() == 0U) param_.dilate = mshadow::Shape2(1, 1);
if (param_.pad.ndim() == 0U) param_.pad = mshadow::Shape2(0, 0);
N = 0, H = 2, W = 3, C = 1;
src_type_ = mshadow::DataType<SrcType>::kCudnnFlag;
dst_type_ = mshadow::DataType<DstType>::kCudnnFlag;
cmp_type_ = mshadow::DataType<CmpType>::kCudnnFlag;
algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
format_ = CUDNN_TENSOR_NHWC;
InitDescriptors(in_shape, out_shape);
GetTempSize(ctx);
}
~QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc_));
}
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
using namespace mshadow;
CHECK_EQ(in_data.size(), param_.no_bias? 6U : 9U);
CHECK_EQ(out_data.size(), 3U);
Stream<gpu> *s = ctx.get_stream<gpu>();
CHECK_EQ(s->dnn_handle_ownership_, Stream<gpu>::OwnHandle);
const TBlob& data = in_data[0];
const TBlob& filter = in_data[1];
const TBlob& out = out_data[0];
const TShape& dshape = data.shape_;
const TShape& fshape = filter.shape_;
const TShape& oshape = out.shape_;
// allocate workspace
const int dev_id = ctx.run_ctx.ctx.dev_id;
const int dev_mask = gpu::kDevMask;
if (!param_.layout.has_value() || param_.layout.value() == mshadow::kNCHW) {
const size_t data_size = dshape.Size();
const size_t weight_size = fshape.Size();
const size_t output_size = oshape.Size();
size_t total_temp_bytes = (workspace_ + data_size + weight_size) * sizeof(SrcType)
+ output_size * (sizeof(DstType) + sizeof(int32_t));
Tensor<gpu, 1, char> temp_space =
ctx.requested[0].get_space_typed<gpu, 1, char>(mshadow::Shape1(total_temp_bytes), s);
char* temp_dptr = temp_space.dptr_;
TBlob data_(reinterpret_cast<SrcType*>(temp_dptr),
TShape({dshape[N], dshape[H], dshape[W], dshape[C]}),
dev_mask, DataType<SrcType>::kFlag, dev_id);
temp_dptr += data_size * sizeof(SrcType);
TBlob filter_(reinterpret_cast<SrcType*>(temp_dptr),
TShape({fshape[N], fshape[H], fshape[W], fshape[C]}),
dev_mask, DataType<SrcType>::kFlag, dev_id);
temp_dptr += weight_size * sizeof(SrcType);
// input: [NCHW] => [NHWC](batch, in_height, in_width, in_channels)
// filter: [NCHW] => [NHWC](out_channels, filter_height, filter_width, in_channels)
TransposeImpl<gpu>(ctx.run_ctx, data, data_, TShape({N, H, W, C}));
TransposeImpl<gpu>(ctx.run_ctx, filter, filter_, TShape({N, H, W, C}));
TBlob out_(reinterpret_cast<DstType*>(temp_dptr),
TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask, DataType<DstType>::kFlag, dev_id);
temp_dptr += output_size * sizeof(DstType);
TBlob out_tcast(reinterpret_cast<int32_t*>(temp_dptr),
TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask, DataType<int32_t>::kFlag, dev_id);
temp_dptr += output_size * sizeof(int32_t);
// input: [NHWC](batch, in_height, in_width, in_channels)
// filter: [HWNC](out_channels, filter_height, filter_width, in_channels)
// output: [NHWC](batch, out_height, out_width, out_channels)
CUDNN_CALL(cudnnConvolutionForward(s->dnn_handle_,
&alpha_,
data_desc_,
data_.dptr_,
filter_desc_,
filter_.dptr_,
conv_desc_,
algo_,
temp_dptr,
workspace_byte_,
&beta_,
out_desc_,
out_.dptr_));
Tensor<gpu, 1, DstType> out_tensor = out_.FlatTo1D<gpu, DstType>(s);
Tensor<gpu, 1, int32_t> out_tcast_tensor = out_tcast.FlatTo1D<gpu, int32_t>(s);
Assign(out_tcast_tensor, kWriteTo, mshadow::expr::tcast<int32_t>(out_tensor));
// output: [NHWC](batch, out_height, out_width, out_channels) => [NCHW]
TransposeImpl<gpu>(ctx.run_ctx, out_tcast, out, TShape({0, 3, 1, 2}));
} else {
LOG(FATAL) << "quantized_conv only supports NCHW for now";
}
// calculate the min/max range for out_data as it's a multiplication
// of in_data[0] and in_data[1]. Need to rescale the min/max range of out_data
// based on the min/max ranges of in_data[0] and in_data[1].
const size_t num_inputs = param_.no_bias ? 2 : 3;
mxnet_op::Kernel<QuantizationRangeForMultiplicationStruct, gpu>::Launch(s, 1,
out_data[1].dptr<float>(), out_data[2].dptr<float>(),
in_data[num_inputs].dptr<float>(), in_data[num_inputs+1].dptr<float>(),
in_data[num_inputs+2].dptr<float>(), in_data[num_inputs+3].dptr<float>());
if (!param_.no_bias) {
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "quantized_conv only supports NCHW when there is a bias";
}
const TBlob& bias = in_data[2];
mxnet_op::Kernel<QuantizedBiasAddKernel, gpu>::Launch(s, out.Size(),
bias.Size(), out.dptr<int32_t>(), bias.dptr<int8_t>(),
out_data[1].dptr<float>(), out_data[2].dptr<float>(),
in_data[7].dptr<float>(), in_data[8].dptr<float>(),
oshape[2] * oshape[3]);
}
}
void InitDescriptors(const std::vector<TShape>& in_shape,
const std::vector<TShape>& out_shape) {
const TShape& dshape = in_shape[0];
const TShape& kshape = in_shape[1];
const TShape& oshape = out_shape[0];
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc_,
param_.pad[0],
param_.pad[1],
param_.stride[0],
param_.stride[1],
1,
1,
CUDNN_CROSS_CORRELATION,
cmp_type_));
CUDNN_CALL(cudnnSetTensor4dDescriptor(data_desc_,
format_,
src_type_,
dshape[N],
dshape[C],
dshape[H],
dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc_,
format_,
dst_type_,
oshape[N],
oshape[C],
oshape[H],
oshape[W]));
CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc_,
src_type_,
format_,
kshape[N],
kshape[C],
kshape[H],
kshape[W]));
}
void GetTempSize(const OpContext& ctx) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(s->dnn_handle_,
data_desc_,
filter_desc_,
conv_desc_,
out_desc_,
algo_,
&workspace_byte_));
workspace_ = workspace_byte_ / sizeof(SrcType) + 1;
}
private:
ConvolutionParam param_;
size_t workspace_;
size_t workspace_byte_;
cudnnDataType_t src_type_;
cudnnDataType_t dst_type_;
cudnnDataType_t cmp_type_;
cudnnTensorFormat_t format_;
cudnnConvolutionDescriptor_t conv_desc_;
cudnnTensorDescriptor_t data_desc_;
cudnnFilterDescriptor_t filter_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnConvolutionFwdAlgo_t algo_;
uint32_t N, H, W, C;
float alpha_ = 1.0f;
float beta_ = 0.0f;
}; // class QuantizedCuDNNConvOp
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
void QuantizedConvForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedConvForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
typedef QuantizedCuDNNConvOp<int8_t, float, int32_t> QuantizedConvOpInt8;
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedConvOpInt8 op;
#else
static MX_THREAD_LOCAL QuantizedConvOpInt8 op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, ctx, {inputs[0].shape_, inputs[1].shape_}, {outputs[0].shape_});
op.Forward(ctx, inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedConvForward<gpu> only supports cudnnConvolutionForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_conv)
.set_attr<FCompute>("FCompute<gpu>", QuantizedConvForwardGPU);
} // namespace op
} // namespace mxnet
| c1c711820ec9293f63beb39946b79a5329dd2fb6.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file quantized_conv.cu
* \brief
* \author Ziheng Jiang, Jun Wu
*/
#include "../nn/convolution-inl.h"
#include "./quantization_utils.h"
#include "../tensor/matrix_op-inl.h"
namespace mxnet {
namespace op {
// value + bias_value * (range1 / limit_range1) * (limit_range2 / range2)
struct QuantizedBiasAddKernel {
MSHADOW_XINLINE static void Map(int i, size_t bias_size, int32_t *out,
const int8_t *bias, const float *min_out,
const float *max_out, const float *min_bias,
const float *max_bias, const size_t spatial_size) {
using mshadow::red::limits::MinValue;
using mshadow::red::limits::MaxValue;
float float_for_one_out_quant =
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<int32_t>());
float float_for_one_bias_quant =
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<int8_t>());
const size_t channel_id = (i / spatial_size) % bias_size;
out[i] = (out[i] * float_for_one_out_quant +
bias[channel_id] * float_for_one_bias_quant) /
float_for_one_out_quant;
}
};
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
template<typename SrcType, typename DstType, typename CmpType>
class QuantizedCuDNNConvOp {
public:
QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
void Init(const ConvolutionParam& param,
const OpContext& ctx,
const std::vector<TShape>& in_shape,
const std::vector<TShape>& out_shape) {
param_ = param;
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "QuantizedConvOp only supports NCHW for now";
}
if (param_.stride.ndim() == 0U) param_.stride = mshadow::Shape2(1, 1);
if (param_.dilate.ndim() == 0U) param_.dilate = mshadow::Shape2(1, 1);
if (param_.pad.ndim() == 0U) param_.pad = mshadow::Shape2(0, 0);
N = 0, H = 2, W = 3, C = 1;
src_type_ = mshadow::DataType<SrcType>::kCudnnFlag;
dst_type_ = mshadow::DataType<DstType>::kCudnnFlag;
cmp_type_ = mshadow::DataType<CmpType>::kCudnnFlag;
algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
format_ = CUDNN_TENSOR_NHWC;
InitDescriptors(in_shape, out_shape);
GetTempSize(ctx);
}
~QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc_));
}
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
using namespace mshadow;
CHECK_EQ(in_data.size(), param_.no_bias? 6U : 9U);
CHECK_EQ(out_data.size(), 3U);
Stream<gpu> *s = ctx.get_stream<gpu>();
CHECK_EQ(s->dnn_handle_ownership_, Stream<gpu>::OwnHandle);
const TBlob& data = in_data[0];
const TBlob& filter = in_data[1];
const TBlob& out = out_data[0];
const TShape& dshape = data.shape_;
const TShape& fshape = filter.shape_;
const TShape& oshape = out.shape_;
// allocate workspace
const int dev_id = ctx.run_ctx.ctx.dev_id;
const int dev_mask = gpu::kDevMask;
if (!param_.layout.has_value() || param_.layout.value() == mshadow::kNCHW) {
const size_t data_size = dshape.Size();
const size_t weight_size = fshape.Size();
const size_t output_size = oshape.Size();
size_t total_temp_bytes = (workspace_ + data_size + weight_size) * sizeof(SrcType)
+ output_size * (sizeof(DstType) + sizeof(int32_t));
Tensor<gpu, 1, char> temp_space =
ctx.requested[0].get_space_typed<gpu, 1, char>(mshadow::Shape1(total_temp_bytes), s);
char* temp_dptr = temp_space.dptr_;
TBlob data_(reinterpret_cast<SrcType*>(temp_dptr),
TShape({dshape[N], dshape[H], dshape[W], dshape[C]}),
dev_mask, DataType<SrcType>::kFlag, dev_id);
temp_dptr += data_size * sizeof(SrcType);
TBlob filter_(reinterpret_cast<SrcType*>(temp_dptr),
TShape({fshape[N], fshape[H], fshape[W], fshape[C]}),
dev_mask, DataType<SrcType>::kFlag, dev_id);
temp_dptr += weight_size * sizeof(SrcType);
// input: [NCHW] => [NHWC](batch, in_height, in_width, in_channels)
// filter: [NCHW] => [NHWC](out_channels, filter_height, filter_width, in_channels)
TransposeImpl<gpu>(ctx.run_ctx, data, data_, TShape({N, H, W, C}));
TransposeImpl<gpu>(ctx.run_ctx, filter, filter_, TShape({N, H, W, C}));
TBlob out_(reinterpret_cast<DstType*>(temp_dptr),
TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask, DataType<DstType>::kFlag, dev_id);
temp_dptr += output_size * sizeof(DstType);
TBlob out_tcast(reinterpret_cast<int32_t*>(temp_dptr),
TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask, DataType<int32_t>::kFlag, dev_id);
temp_dptr += output_size * sizeof(int32_t);
// input: [NHWC](batch, in_height, in_width, in_channels)
// filter: [HWNC](out_channels, filter_height, filter_width, in_channels)
// output: [NHWC](batch, out_height, out_width, out_channels)
CUDNN_CALL(cudnnConvolutionForward(s->dnn_handle_,
&alpha_,
data_desc_,
data_.dptr_,
filter_desc_,
filter_.dptr_,
conv_desc_,
algo_,
temp_dptr,
workspace_byte_,
&beta_,
out_desc_,
out_.dptr_));
Tensor<gpu, 1, DstType> out_tensor = out_.FlatTo1D<gpu, DstType>(s);
Tensor<gpu, 1, int32_t> out_tcast_tensor = out_tcast.FlatTo1D<gpu, int32_t>(s);
Assign(out_tcast_tensor, kWriteTo, mshadow::expr::tcast<int32_t>(out_tensor));
// output: [NHWC](batch, out_height, out_width, out_channels) => [NCHW]
TransposeImpl<gpu>(ctx.run_ctx, out_tcast, out, TShape({0, 3, 1, 2}));
} else {
LOG(FATAL) << "quantized_conv only supports NCHW for now";
}
// calculate the min/max range for out_data as it's a multiplication
// of in_data[0] and in_data[1]. Need to rescale the min/max range of out_data
// based on the min/max ranges of in_data[0] and in_data[1].
const size_t num_inputs = param_.no_bias ? 2 : 3;
mxnet_op::Kernel<QuantizationRangeForMultiplicationStruct, gpu>::Launch(s, 1,
out_data[1].dptr<float>(), out_data[2].dptr<float>(),
in_data[num_inputs].dptr<float>(), in_data[num_inputs+1].dptr<float>(),
in_data[num_inputs+2].dptr<float>(), in_data[num_inputs+3].dptr<float>());
if (!param_.no_bias) {
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "quantized_conv only supports NCHW when there is a bias";
}
const TBlob& bias = in_data[2];
mxnet_op::Kernel<QuantizedBiasAddKernel, gpu>::Launch(s, out.Size(),
bias.Size(), out.dptr<int32_t>(), bias.dptr<int8_t>(),
out_data[1].dptr<float>(), out_data[2].dptr<float>(),
in_data[7].dptr<float>(), in_data[8].dptr<float>(),
oshape[2] * oshape[3]);
}
}
void InitDescriptors(const std::vector<TShape>& in_shape,
const std::vector<TShape>& out_shape) {
const TShape& dshape = in_shape[0];
const TShape& kshape = in_shape[1];
const TShape& oshape = out_shape[0];
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc_,
param_.pad[0],
param_.pad[1],
param_.stride[0],
param_.stride[1],
1,
1,
CUDNN_CROSS_CORRELATION,
cmp_type_));
CUDNN_CALL(cudnnSetTensor4dDescriptor(data_desc_,
format_,
src_type_,
dshape[N],
dshape[C],
dshape[H],
dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc_,
format_,
dst_type_,
oshape[N],
oshape[C],
oshape[H],
oshape[W]));
CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc_,
src_type_,
format_,
kshape[N],
kshape[C],
kshape[H],
kshape[W]));
}
void GetTempSize(const OpContext& ctx) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(s->dnn_handle_,
data_desc_,
filter_desc_,
conv_desc_,
out_desc_,
algo_,
&workspace_byte_));
workspace_ = workspace_byte_ / sizeof(SrcType) + 1;
}
private:
ConvolutionParam param_;
size_t workspace_;
size_t workspace_byte_;
cudnnDataType_t src_type_;
cudnnDataType_t dst_type_;
cudnnDataType_t cmp_type_;
cudnnTensorFormat_t format_;
cudnnConvolutionDescriptor_t conv_desc_;
cudnnTensorDescriptor_t data_desc_;
cudnnFilterDescriptor_t filter_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnConvolutionFwdAlgo_t algo_;
uint32_t N, H, W, C;
float alpha_ = 1.0f;
float beta_ = 0.0f;
}; // class QuantizedCuDNNConvOp
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
void QuantizedConvForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedConvForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
typedef QuantizedCuDNNConvOp<int8_t, float, int32_t> QuantizedConvOpInt8;
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedConvOpInt8 op;
#else
static MX_THREAD_LOCAL QuantizedConvOpInt8 op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, ctx, {inputs[0].shape_, inputs[1].shape_}, {outputs[0].shape_});
op.Forward(ctx, inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedConvForward<gpu> only supports cudnnConvolutionForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_conv)
.set_attr<FCompute>("FCompute<gpu>", QuantizedConvForwardGPU);
} // namespace op
} // namespace mxnet
|
f3f1c5aaa08b29e8eea10a4dad54ce74ef22ead8.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.h"
#include <hipcub/hipcub.hpp>
template <class T>
struct Scan {
static
gdf_error call(const T *inp, T *out, size_t size, bool inclusive) {
using hipcub::DeviceScan;
auto scan_function = (inclusive? inclusive_sum : exclusive_sum);
// Prepare temp storage
void *temp_storage = NULL;
size_t temp_storage_bytes = 0;
scan_function(temp_storage, temp_storage_bytes, inp, out, size);
RMM_TRY( RMM_ALLOC(&temp_storage, temp_storage_bytes, 0) ); // TODO: non-default stream
// Do scan
scan_function(temp_storage, temp_storage_bytes, inp, out, size);
// Cleanup
RMM_TRY( RMM_FREE(temp_storage, 0) ); // TODO: non-default stream
return GDF_SUCCESS;
}
static
gdf_error exclusive_sum(void *&temp_storage, size_t &temp_storage_bytes,
const T *inp, T *out, size_t size) {
hipcub::DeviceScan::ExclusiveSum(temp_storage, temp_storage_bytes,
inp, out, size);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
static
gdf_error inclusive_sum(void *&temp_storage, size_t &temp_storage_bytes,
const T *inp, T *out, size_t size) {
hipcub::DeviceScan::InclusiveSum(temp_storage, temp_storage_bytes,
inp, out, size);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
};
#define SCAN_IMPL(F, T) \
gdf_error gdf_prefixsum_##F(gdf_column *inp, gdf_column *out, int inclusive) {\
GDF_REQUIRE( inp->size == out->size, GDF_COLUMN_SIZE_MISMATCH ); \
GDF_REQUIRE( inp->dtype == out->dtype, GDF_UNSUPPORTED_DTYPE ); \
GDF_REQUIRE( !inp->valid || !inp->null_count, GDF_VALIDITY_UNSUPPORTED ); \
GDF_REQUIRE( !out->valid || !out->null_count, GDF_VALIDITY_UNSUPPORTED ); \
return Scan<T>::call((const T*)inp->data, (T*)out->data, inp->size, \
inclusive); \
}
SCAN_IMPL(i8, int8_t)
SCAN_IMPL(i32, int32_t)
SCAN_IMPL(i64, int64_t)
gdf_error gdf_prefixsum_generic(gdf_column *inp, gdf_column *out,
int inclusive)
{
switch (inp->dtype) {
case GDF_INT8: return gdf_prefixsum_i8(inp, out, inclusive);
case GDF_INT32: return gdf_prefixsum_i32(inp, out, inclusive);
case GDF_INT64: return gdf_prefixsum_i64(inp, out, inclusive);
default: return GDF_SUCCESS;
}
}
| f3f1c5aaa08b29e8eea10a4dad54ce74ef22ead8.cu | #include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/cudf_utils.h"
#include "utilities/error_utils.h"
#include <cub/device/device_scan.cuh>
template <class T>
struct Scan {
static
gdf_error call(const T *inp, T *out, size_t size, bool inclusive) {
using cub::DeviceScan;
auto scan_function = (inclusive? inclusive_sum : exclusive_sum);
// Prepare temp storage
void *temp_storage = NULL;
size_t temp_storage_bytes = 0;
scan_function(temp_storage, temp_storage_bytes, inp, out, size);
RMM_TRY( RMM_ALLOC(&temp_storage, temp_storage_bytes, 0) ); // TODO: non-default stream
// Do scan
scan_function(temp_storage, temp_storage_bytes, inp, out, size);
// Cleanup
RMM_TRY( RMM_FREE(temp_storage, 0) ); // TODO: non-default stream
return GDF_SUCCESS;
}
static
gdf_error exclusive_sum(void *&temp_storage, size_t &temp_storage_bytes,
const T *inp, T *out, size_t size) {
cub::DeviceScan::ExclusiveSum(temp_storage, temp_storage_bytes,
inp, out, size);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
static
gdf_error inclusive_sum(void *&temp_storage, size_t &temp_storage_bytes,
const T *inp, T *out, size_t size) {
cub::DeviceScan::InclusiveSum(temp_storage, temp_storage_bytes,
inp, out, size);
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
};
#define SCAN_IMPL(F, T) \
gdf_error gdf_prefixsum_##F(gdf_column *inp, gdf_column *out, int inclusive) {\
GDF_REQUIRE( inp->size == out->size, GDF_COLUMN_SIZE_MISMATCH ); \
GDF_REQUIRE( inp->dtype == out->dtype, GDF_UNSUPPORTED_DTYPE ); \
GDF_REQUIRE( !inp->valid || !inp->null_count, GDF_VALIDITY_UNSUPPORTED ); \
GDF_REQUIRE( !out->valid || !out->null_count, GDF_VALIDITY_UNSUPPORTED ); \
return Scan<T>::call((const T*)inp->data, (T*)out->data, inp->size, \
inclusive); \
}
SCAN_IMPL(i8, int8_t)
SCAN_IMPL(i32, int32_t)
SCAN_IMPL(i64, int64_t)
gdf_error gdf_prefixsum_generic(gdf_column *inp, gdf_column *out,
int inclusive)
{
switch (inp->dtype) {
case GDF_INT8: return gdf_prefixsum_i8(inp, out, inclusive);
case GDF_INT32: return gdf_prefixsum_i32(inp, out, inclusive);
case GDF_INT64: return gdf_prefixsum_i64(inp, out, inclusive);
default: return GDF_SUCCESS;
}
}
|
5e51c3f2502e8cc5174732f53bbbe930011227bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "voronoi_trace.cuh"
#include <iostream>
__global__
void draw_voronoi_trace(vec3* device_buffer, hiprandState_t* d_rand_state, RadianceMap* radiance_map, Camera* camera, Scene* scene){
// int i = radiance_map->radiance_array[1000].data;
// printf("%d\n", i);
// Populate the shared GPU/CPU screen buffer
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
device_buffer[ x*SCREEN_HEIGHT + y ] = voronoi_trace(d_rand_state, camera, radiance_map, x, y, scene);
}
__device__
vec3 voronoi_trace(hiprandState_t* d_rand_state, Camera* camera, RadianceMap* radiance_map, int pixel_x, int pixel_y, Scene* scene){
// Generate the random point within a pixel for the ray to pass through
float x = (float)pixel_x + hiprand_uniform(&d_rand_state[pixel_x*(int)SCREEN_HEIGHT + pixel_y]);
float y = (float)pixel_y + hiprand_uniform(&d_rand_state[pixel_x*(int)SCREEN_HEIGHT + pixel_y]);
// Set direction to pass through pixel (pixel space -> Camera space)
vec4 dir((x - (float)SCREEN_WIDTH / 2.f) , (y - (float)SCREEN_HEIGHT / 2.f) , (float)FOCAL_LENGTH , 1);
// Create a ray that we will change the direction for below
Ray ray(camera->position, dir);
ray.rotate_ray(0, 0);
// Trace the path of the ray to find the closest intersection
ray.closest_intersection(scene);
if (ray.intersection.intersection_type == SURFACE){
// Get the voronoi colour of the intersection point
return radiance_map->get_voronoi_colour(ray.intersection);
}
else{
return vec3(1.f);
}
} | 5e51c3f2502e8cc5174732f53bbbe930011227bb.cu | #include "voronoi_trace.cuh"
#include <iostream>
__global__
void draw_voronoi_trace(vec3* device_buffer, curandState* d_rand_state, RadianceMap* radiance_map, Camera* camera, Scene* scene){
// int i = radiance_map->radiance_array[1000].data;
// printf("%d\n", i);
// Populate the shared GPU/CPU screen buffer
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
device_buffer[ x*SCREEN_HEIGHT + y ] = voronoi_trace(d_rand_state, camera, radiance_map, x, y, scene);
}
__device__
vec3 voronoi_trace(curandState* d_rand_state, Camera* camera, RadianceMap* radiance_map, int pixel_x, int pixel_y, Scene* scene){
// Generate the random point within a pixel for the ray to pass through
float x = (float)pixel_x + curand_uniform(&d_rand_state[pixel_x*(int)SCREEN_HEIGHT + pixel_y]);
float y = (float)pixel_y + curand_uniform(&d_rand_state[pixel_x*(int)SCREEN_HEIGHT + pixel_y]);
// Set direction to pass through pixel (pixel space -> Camera space)
vec4 dir((x - (float)SCREEN_WIDTH / 2.f) , (y - (float)SCREEN_HEIGHT / 2.f) , (float)FOCAL_LENGTH , 1);
// Create a ray that we will change the direction for below
Ray ray(camera->position, dir);
ray.rotate_ray(0, 0);
// Trace the path of the ray to find the closest intersection
ray.closest_intersection(scene);
if (ray.intersection.intersection_type == SURFACE){
// Get the voronoi colour of the intersection point
return radiance_map->get_voronoi_colour(ray.intersection);
}
else{
return vec3(1.f);
}
} |
09311048894001ebc17ae60098b8cdadbba6fd5f.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 09311048894001ebc17ae60098b8cdadbba6fd5f.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
4e2f9e264a2979d1a67bfb4ae485afc335b99f3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Jack Hartmann Kmeans
//12/2/18
//Elon University
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <string.h>
#include <time.h>
//#include "gfx.h":
extern "C" {
#include "gfx.h"
}
typedef struct {
int x;
int y;
int cluster;
} point_t;
typedef struct {
point_t centroid;
int size;
} set_t;
#define NO_CLUSTER -1
#define K 3
/* a way to choose colors */
unsigned int colors[] = { 0xFFFF00, 0x1CE6FF, 0xFF34FF, 0xFF4A46,
0x008941, 0x006FA6, 0xA30059, 0xFFDBE5, 0x7A4900,
0x0000A6, 0x63FFAC, 0xB79762, 0x004D43, 0x8FB0FF,
0x997D87, 0x5A0007, 0x809693, 0xFEFFE6, 0x1B4400,
0x4FC601, 0x3B5DFF, 0x4A3B53, 0xFF2F80, 0x61615A,
0xBA0900, 0x6B7900, 0x00C2A0, 0xFFAA92, 0xFF90C9,
0xB903AA, 0xD16100, 0xDDEFFF, 0x000035, 0x7B4F4B,
0xA1C299, 0x300018, 0x0AA6D8, 0x013349, 0x00846F,
0x372101, 0xFFB500, 0xC2FFED, 0xA079BF, 0xCC0744,
0xC0B9B2, 0xC2FF99, 0x001E09, 0x00489C, 0x6F0062,
0x0CBD66, 0xEEC3FF, 0x456D75, 0xB77B68, 0x7A87A1,
0x788D66, 0x885578, 0xFAD09F, 0xFF8A9A, 0xD157A0,
0xBEC459, 0x456648, 0x0086ED, 0x886F4C, 0x34362D,
0xB4A8BD, 0x00A6AA, 0x452C2C, 0x636375, 0xA3C8C9,
0xFF913F, 0x938A81, 0x575329, 0x00FECF, 0xB05B6F,
0x8CD0FF, 0x3B9700, 0x04F757, 0xC8A1A1, 0x1E6E00,
0x7900D7, 0xA77500, 0x6367A9, 0xA05837, 0x6B002C,
0x772600, 0xD790FF, 0x9B9700, 0x549E79, 0xFFF69F,
0x201625, 0x72418F, 0xBC23FF, 0x99ADC0, 0x3A2465,
0x922329, 0x5B4534, 0xFDE8DC, 0x404E55, 0x0089A3,
0xCB7E98, 0xA4E804, 0x324E72, 0x6A3A4C };
/* draw the observations */
void show_observations(int num_observations, point_t *observation, set_t *cluster) {
gfx_clear();
// show the observations
int j, i;
for (j = 0; j < K; j++) {
// change to the set color
for (i = 0; i < num_observations; i++) {
if (observation[i].cluster == -1) {
gfx_color(255, 255, 255);
} else {
gfx_color((colors[observation[i].cluster] >> 16) & 0xFF,
(colors[observation[i].cluster] >> 8) & 0xFF,
colors[observation[i].cluster] & 0xFF);
}
gfx_line(observation[i].x, observation[i].y, observation[i].x, observation[i].y);
}
}
for (i = 0; i < K; i++) {
gfx_color((colors[i] >> 16) & 0xFF,
(colors[i] >> 8) & 0xFF,
colors[i] & 0xFF);
gfx_line(cluster[i].centroid.x, cluster[i].centroid.y - 10, cluster[i].centroid.x, cluster[i].centroid.y + 10);
gfx_line(cluster[i].centroid.x - 10, cluster[i].centroid.y, cluster[i].centroid.x + 10, cluster[i].centroid.y);
}
gfx_flush();
}
//Run all centroid Calculations and update the observations with their centroid
__global__ void centroid_calc(int size, point_t *observations, set_t *cluster, int num_observations) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int min_cluster = -1;
for (i; i < num_observations; i++) {
double min_mean = size * size;
for (int k = 0; k < K; k++) {
double mean = powf(observations[i].x - cluster[k].centroid.x, 2)
+ powf(observations[i].y - cluster[k].centroid.y, 2);
if (min_mean > mean) {
min_mean = mean;
min_cluster = k;
}
}
observations[i].cluster = min_cluster;
cluster[min_cluster].size++;
}
}
int main() {
printf("Version 1");
/* read first line to determine how much data */
int size, num_observations;
scanf("%d %d", &num_observations, &size);
/* data */
point_t observations[num_observations];
set_t cluster[K];
/* read the data in */
for (int i = 0; i < num_observations; i++) {
scanf("%d %d", &(observations[i].x), &(observations[i].y));
observations[i].cluster = NO_CLUSTER;
}
/* randomly set centroids */
unsigned int seed = (unsigned int) time(NULL);
for (int i = 0; i < K; i++) {
int which = rand_r(&seed) % num_observations;
// Forgy's Method
cluster[i].centroid.x = observations[which].x;
cluster[i].centroid.y = observations[which].y;
cluster[i].size = 0;
cluster[i].centroid.cluster = i;
}
//initiate GFX
gfx_open(size, size, "k-means clustering");
int how_many_move = 1;
int iterations = 0;
while (how_many_move > 0) {
show_observations(num_observations, observations, cluster);
// set all cluster sizes to 0
for (int k = 0; k < K; k++) {
cluster[k].size = 0;
}
iterations++;
printf("Iteration %d\n", iterations);
how_many_move = 0;
// ASSIGNMENT STEP!!!!!
point_t *d_observations;
set_t *d_cluster;
int d_min_cluster;
size_t size = num_observations * sizeof(point_t);
size_t size_cluster = K * sizeof(set_t);
//Allocate the memory for the cluster and observartion arrays on the device
hipMalloc(&d_observations, size);
hipMalloc(&d_cluster, size_cluster);
// copy the data from the local arrays to the device
hipMemcpy(d_observations, observations, size, hipMemcpyHostToDevice);
hipMemcpy(d_cluster, cluster, size_cluster, hipMemcpyHostToDevice);
//Determinned the block and grid format for the device
int threadsPerBlock = 256;
int blocksPerGrid = (num_observations + threadsPerBlock - 1) / threadsPerBlock;
//Call the centroid calculation method on the device
hipLaunchKernelGGL(( centroid_calc), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, d_observations, d_cluster, num_observations);
//copy the observations and clusters back to the cpu to update Calculations
//the amount of data copied over could be reduced if the structs were reformatted
hipMemcpy(observations, d_observations, size, hipMemcpyDeviceToHost);
hipMemcpy(cluster, d_cluster, size_cluster, hipMemcpyDeviceToHost);
//hipMemcpy(min_cluster, d_min_cluster, sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < num_observations; i++) {
if(observations[i].cluster != -1) {
how_many_move++;
}
}
// UPDATE STEP!!!!!
int sum_x[K], sum_y[K];
bzero(sum_x, K * sizeof(int));
bzero(sum_y, K * sizeof(int));
for (int i = 0; i < num_observations; i++) {
sum_x[observations[i].cluster] += observations[i].x;
sum_y[observations[i].cluster] += observations[i].y;
}
for (int k = 0; k < K; k++) {
printf("%d: sum_x = %d, sum_y = %d, cluster.size = %d\n", k, sum_x[k], sum_y[k], cluster[k].size);
cluster[k].centroid.x = sum_x[k] / cluster[k].size;
cluster[k].centroid.y = sum_y[k] / cluster[k].size;
printf("cluster %d: (%d, %d)\n", k, cluster[k].centroid.x, cluster[k].centroid.y);
}
sleep(1);
}
printf("Done with %d itertions\n", iterations);
while (1) {
char c = gfx_wait();
if (c == 'q') break;
}
}
/* gcc -o kmeans kmeans.c gfx.c -I/usr/X11/include -L/usr/X11/lib -lX11 -lm */
/* ./kmeans.cu*/
| 4e2f9e264a2979d1a67bfb4ae485afc335b99f3e.cu | //Jack Hartmann Kmeans
//12/2/18
//Elon University
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <string.h>
#include <time.h>
//#include "gfx.h":
extern "C" {
#include "gfx.h"
}
typedef struct {
int x;
int y;
int cluster;
} point_t;
typedef struct {
point_t centroid;
int size;
} set_t;
#define NO_CLUSTER -1
#define K 3
/* a way to choose colors */
unsigned int colors[] = { 0xFFFF00, 0x1CE6FF, 0xFF34FF, 0xFF4A46,
0x008941, 0x006FA6, 0xA30059, 0xFFDBE5, 0x7A4900,
0x0000A6, 0x63FFAC, 0xB79762, 0x004D43, 0x8FB0FF,
0x997D87, 0x5A0007, 0x809693, 0xFEFFE6, 0x1B4400,
0x4FC601, 0x3B5DFF, 0x4A3B53, 0xFF2F80, 0x61615A,
0xBA0900, 0x6B7900, 0x00C2A0, 0xFFAA92, 0xFF90C9,
0xB903AA, 0xD16100, 0xDDEFFF, 0x000035, 0x7B4F4B,
0xA1C299, 0x300018, 0x0AA6D8, 0x013349, 0x00846F,
0x372101, 0xFFB500, 0xC2FFED, 0xA079BF, 0xCC0744,
0xC0B9B2, 0xC2FF99, 0x001E09, 0x00489C, 0x6F0062,
0x0CBD66, 0xEEC3FF, 0x456D75, 0xB77B68, 0x7A87A1,
0x788D66, 0x885578, 0xFAD09F, 0xFF8A9A, 0xD157A0,
0xBEC459, 0x456648, 0x0086ED, 0x886F4C, 0x34362D,
0xB4A8BD, 0x00A6AA, 0x452C2C, 0x636375, 0xA3C8C9,
0xFF913F, 0x938A81, 0x575329, 0x00FECF, 0xB05B6F,
0x8CD0FF, 0x3B9700, 0x04F757, 0xC8A1A1, 0x1E6E00,
0x7900D7, 0xA77500, 0x6367A9, 0xA05837, 0x6B002C,
0x772600, 0xD790FF, 0x9B9700, 0x549E79, 0xFFF69F,
0x201625, 0x72418F, 0xBC23FF, 0x99ADC0, 0x3A2465,
0x922329, 0x5B4534, 0xFDE8DC, 0x404E55, 0x0089A3,
0xCB7E98, 0xA4E804, 0x324E72, 0x6A3A4C };
/* draw the observations */
void show_observations(int num_observations, point_t *observation, set_t *cluster) {
gfx_clear();
// show the observations
int j, i;
for (j = 0; j < K; j++) {
// change to the set color
for (i = 0; i < num_observations; i++) {
if (observation[i].cluster == -1) {
gfx_color(255, 255, 255);
} else {
gfx_color((colors[observation[i].cluster] >> 16) & 0xFF,
(colors[observation[i].cluster] >> 8) & 0xFF,
colors[observation[i].cluster] & 0xFF);
}
gfx_line(observation[i].x, observation[i].y, observation[i].x, observation[i].y);
}
}
for (i = 0; i < K; i++) {
gfx_color((colors[i] >> 16) & 0xFF,
(colors[i] >> 8) & 0xFF,
colors[i] & 0xFF);
gfx_line(cluster[i].centroid.x, cluster[i].centroid.y - 10, cluster[i].centroid.x, cluster[i].centroid.y + 10);
gfx_line(cluster[i].centroid.x - 10, cluster[i].centroid.y, cluster[i].centroid.x + 10, cluster[i].centroid.y);
}
gfx_flush();
}
//Run all centroid Calculations and update the observations with their centroid
__global__ void centroid_calc(int size, point_t *observations, set_t *cluster, int num_observations) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int min_cluster = -1;
for (i; i < num_observations; i++) {
double min_mean = size * size;
for (int k = 0; k < K; k++) {
double mean = powf(observations[i].x - cluster[k].centroid.x, 2)
+ powf(observations[i].y - cluster[k].centroid.y, 2);
if (min_mean > mean) {
min_mean = mean;
min_cluster = k;
}
}
observations[i].cluster = min_cluster;
cluster[min_cluster].size++;
}
}
int main() {
printf("Version 1");
/* read first line to determine how much data */
int size, num_observations;
scanf("%d %d", &num_observations, &size);
/* data */
point_t observations[num_observations];
set_t cluster[K];
/* read the data in */
for (int i = 0; i < num_observations; i++) {
scanf("%d %d", &(observations[i].x), &(observations[i].y));
observations[i].cluster = NO_CLUSTER;
}
/* randomly set centroids */
unsigned int seed = (unsigned int) time(NULL);
for (int i = 0; i < K; i++) {
int which = rand_r(&seed) % num_observations;
// Forgy's Method
cluster[i].centroid.x = observations[which].x;
cluster[i].centroid.y = observations[which].y;
cluster[i].size = 0;
cluster[i].centroid.cluster = i;
}
//initiate GFX
gfx_open(size, size, "k-means clustering");
int how_many_move = 1;
int iterations = 0;
while (how_many_move > 0) {
show_observations(num_observations, observations, cluster);
// set all cluster sizes to 0
for (int k = 0; k < K; k++) {
cluster[k].size = 0;
}
iterations++;
printf("Iteration %d\n", iterations);
how_many_move = 0;
// ASSIGNMENT STEP!!!!!
point_t *d_observations;
set_t *d_cluster;
int d_min_cluster;
size_t size = num_observations * sizeof(point_t);
size_t size_cluster = K * sizeof(set_t);
//Allocate the memory for the cluster and observartion arrays on the device
cudaMalloc(&d_observations, size);
cudaMalloc(&d_cluster, size_cluster);
// copy the data from the local arrays to the device
cudaMemcpy(d_observations, observations, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_cluster, cluster, size_cluster, cudaMemcpyHostToDevice);
//Determinned the block and grid format for the device
int threadsPerBlock = 256;
int blocksPerGrid = (num_observations + threadsPerBlock - 1) / threadsPerBlock;
//Call the centroid calculation method on the device
centroid_calc<<<blocksPerGrid, threadsPerBlock>>>(size, d_observations, d_cluster, num_observations);
//copy the observations and clusters back to the cpu to update Calculations
//the amount of data copied over could be reduced if the structs were reformatted
cudaMemcpy(observations, d_observations, size, cudaMemcpyDeviceToHost);
cudaMemcpy(cluster, d_cluster, size_cluster, cudaMemcpyDeviceToHost);
//cudaMemcpy(min_cluster, d_min_cluster, sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < num_observations; i++) {
if(observations[i].cluster != -1) {
how_many_move++;
}
}
// UPDATE STEP!!!!!
int sum_x[K], sum_y[K];
bzero(sum_x, K * sizeof(int));
bzero(sum_y, K * sizeof(int));
for (int i = 0; i < num_observations; i++) {
sum_x[observations[i].cluster] += observations[i].x;
sum_y[observations[i].cluster] += observations[i].y;
}
for (int k = 0; k < K; k++) {
printf("%d: sum_x = %d, sum_y = %d, cluster.size = %d\n", k, sum_x[k], sum_y[k], cluster[k].size);
cluster[k].centroid.x = sum_x[k] / cluster[k].size;
cluster[k].centroid.y = sum_y[k] / cluster[k].size;
printf("cluster %d: (%d, %d)\n", k, cluster[k].centroid.x, cluster[k].centroid.y);
}
sleep(1);
}
printf("Done with %d itertions\n", iterations);
while (1) {
char c = gfx_wait();
if (c == 'q') break;
}
}
/* gcc -o kmeans kmeans.c gfx.c -I/usr/X11/include -L/usr/X11/lib -lX11 -lm */
/* ./kmeans.cu*/
|
3c814fdb923274b5c2193fecca114e37b9e20bb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "multiply_gpu.cuh"
int TILE_SIZE = 32;
const int NUM_REPS = 100;
__global__ void naive_mult(float *a, float *b, float *c, int nx) {
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
int stride = blockDim.x * gridDim.x;
float sum;
for (int tidxx = tidx; tidxx < nx; tidxx += stride) {
for (int tidyy = tidy; tidyy < nx; tidyy += stride){
sum = 0.0f;
for (int j = 0; j < nx; j++) {
sum += a[tidyy * nx + j] * b[j * nx + tidxx];
}
c[tidyy * nx + tidxx] = sum;
}
}
}
//__global__ void tiled_mult(float *a, float *b, float *c, int nx) {
// int tidx = blockIdx.x * blockDim.x + threadIdx.x;
// int tidy = blockIdx.y * blockDim.y + threadIdx.y;
// int stride = blockDim.x * gridDim.x;
//
// float sum;
//
// for (int tidxx = tidx; tidxx < nx; tidxx += stride) {
// for (int tidyy = tidy; tidyy < nx; tidyy += stride) {
// sum = 0.0f;
// for (int j = 0; j < nx; j++) {
// sum += a[tidyy * nx + j] * b[j * nx + tidxx];
// }
// c[tidyy * nx + tidxx] = sum;
// }
// }
//}
extern "C" void naive_mult_driver(int deviceId, int nx, int ny) {
dim3 numThreads(TILE_SIZE, TILE_SIZE);
dim3 numBlocks(nx / TILE_SIZE, ny / TILE_SIZE);
float *a, *b, *c, *c_ref, *d_a, *d_b, *d_c;
a = (float*)malloc(nx * ny * sizeof(float));
b = (float*)malloc(nx * ny * sizeof(float));
c = (float*)malloc(nx * ny * sizeof(float));
c_ref = (float*)malloc(nx * ny * sizeof(float));
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
a[i * nx + j] = ((float)i+1.0f)/((float)j+1.0f);
b[i * nx + j] = ((float)j + 1.0f) / ((float)i + 1.0f);
}
}
float sum;
int count = 0;
for (int row = 0; row < nx; row++) {
for (int col = 0; col < nx; col++) {
sum = 0.0f;
for (int k = 0; k < nx; k++) {
sum += a[row * nx + k] * b[k * nx + col];
}
c_ref[row * nx + col] = sum;
}
}
CudaSafeCall(hipMalloc((float**)&d_a, nx * ny * sizeof(float)));
CudaSafeCall(hipMalloc((float**)&d_b, nx * ny * sizeof(float)));
CudaSafeCall(hipMalloc((float**)&d_c, nx * ny * sizeof(float)));
CudaSafeCall(hipMemcpy(d_a, a, nx * ny * sizeof(float), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_b, b, nx * ny * sizeof(float), hipMemcpyHostToDevice));
// set up the timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// ==================================
// ROUTINE:Matrix Copy
printf("%20s", "Naive Mat Mult");
float millis = 0;
// warm up
naive_mult << <numBlocks, numThreads >> > (d_a, d_b,d_c, nx);
CudaCheckError();
hipDeviceSynchronize();
// start the timer
hipEventRecord(start, 0);
for (int rep = 0; rep < NUM_REPS; rep++) {
naive_mult << <numBlocks, numThreads >> > (d_a, d_b, d_c, nx);
//hipDeviceSynchronize();
}
// stop the timer
hipEventRecord(stop, 0);
// sync the events
hipEventSynchronize(stop);
hipEventElapsedTime(&millis, start, stop);
// copy memory back to the host
hipMemcpy(c, d_c, nx * ny * sizeof(float), hipMemcpyDeviceToHost);
postprocess(c_ref, c, 2*nx+1, millis, nx*ny, NUM_REPS);
printf("%30s: %d x %d\n", "Matrix Size", nx, ny);
printf("%30s (%d,%d), %10s (%d,%d) \n", "Threads/Block:", numThreads.x, numThreads.y,
"Number of Blocks:", numBlocks.x, numBlocks.y);
} | 3c814fdb923274b5c2193fecca114e37b9e20bb5.cu | #include "multiply_gpu.cuh"
int TILE_SIZE = 32;
const int NUM_REPS = 100;
__global__ void naive_mult(float *a, float *b, float *c, int nx) {
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
int stride = blockDim.x * gridDim.x;
float sum;
for (int tidxx = tidx; tidxx < nx; tidxx += stride) {
for (int tidyy = tidy; tidyy < nx; tidyy += stride){
sum = 0.0f;
for (int j = 0; j < nx; j++) {
sum += a[tidyy * nx + j] * b[j * nx + tidxx];
}
c[tidyy * nx + tidxx] = sum;
}
}
}
//__global__ void tiled_mult(float *a, float *b, float *c, int nx) {
// int tidx = blockIdx.x * blockDim.x + threadIdx.x;
// int tidy = blockIdx.y * blockDim.y + threadIdx.y;
// int stride = blockDim.x * gridDim.x;
//
// float sum;
//
// for (int tidxx = tidx; tidxx < nx; tidxx += stride) {
// for (int tidyy = tidy; tidyy < nx; tidyy += stride) {
// sum = 0.0f;
// for (int j = 0; j < nx; j++) {
// sum += a[tidyy * nx + j] * b[j * nx + tidxx];
// }
// c[tidyy * nx + tidxx] = sum;
// }
// }
//}
extern "C" void naive_mult_driver(int deviceId, int nx, int ny) {
dim3 numThreads(TILE_SIZE, TILE_SIZE);
dim3 numBlocks(nx / TILE_SIZE, ny / TILE_SIZE);
float *a, *b, *c, *c_ref, *d_a, *d_b, *d_c;
a = (float*)malloc(nx * ny * sizeof(float));
b = (float*)malloc(nx * ny * sizeof(float));
c = (float*)malloc(nx * ny * sizeof(float));
c_ref = (float*)malloc(nx * ny * sizeof(float));
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
a[i * nx + j] = ((float)i+1.0f)/((float)j+1.0f);
b[i * nx + j] = ((float)j + 1.0f) / ((float)i + 1.0f);
}
}
float sum;
int count = 0;
for (int row = 0; row < nx; row++) {
for (int col = 0; col < nx; col++) {
sum = 0.0f;
for (int k = 0; k < nx; k++) {
sum += a[row * nx + k] * b[k * nx + col];
}
c_ref[row * nx + col] = sum;
}
}
CudaSafeCall(cudaMalloc((float**)&d_a, nx * ny * sizeof(float)));
CudaSafeCall(cudaMalloc((float**)&d_b, nx * ny * sizeof(float)));
CudaSafeCall(cudaMalloc((float**)&d_c, nx * ny * sizeof(float)));
CudaSafeCall(cudaMemcpy(d_a, a, nx * ny * sizeof(float), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_b, b, nx * ny * sizeof(float), cudaMemcpyHostToDevice));
// set up the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// ==================================
// ROUTINE:Matrix Copy
printf("%20s", "Naive Mat Mult");
float millis = 0;
// warm up
naive_mult << <numBlocks, numThreads >> > (d_a, d_b,d_c, nx);
CudaCheckError();
cudaDeviceSynchronize();
// start the timer
cudaEventRecord(start, 0);
for (int rep = 0; rep < NUM_REPS; rep++) {
naive_mult << <numBlocks, numThreads >> > (d_a, d_b, d_c, nx);
//cudaDeviceSynchronize();
}
// stop the timer
cudaEventRecord(stop, 0);
// sync the events
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millis, start, stop);
// copy memory back to the host
cudaMemcpy(c, d_c, nx * ny * sizeof(float), cudaMemcpyDeviceToHost);
postprocess(c_ref, c, 2*nx+1, millis, nx*ny, NUM_REPS);
printf("%30s: %d x %d\n", "Matrix Size", nx, ny);
printf("%30s (%d,%d), %10s (%d,%d) \n", "Threads/Block:", numThreads.x, numThreads.y,
"Number of Blocks:", numBlocks.x, numBlocks.y);
} |
b08c96b4f0f548cab270d246353e8bf5f33a152c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "randkernel.cuh"
common::RandEngine::RandEngine(int32_t n) : kernel_num(n) {
cuda_handler(hipMalloc((void**)&devPHILOXStates, sizeof(hiprandStatePhilox4_32_10_t) * kernel_num));
long clock_for_rand = clock();
hipLaunchKernelGGL(( setup_kernel) , dim3(kernel_num), dim3(1), 0, 0, devPHILOXStates, kernel_num, clock_for_rand);
}
common::RandEngine::~RandEngine() {
cuda_handler(hipFree(devPHILOXStates));
}
void common::RandEngine::rand(int32_t n, uint64_t* result) {
if (n < kernel_num) {
hipLaunchKernelGGL(( generate_kernel) , dim3(n), dim3(1) , 0, 0, devPHILOXStates, n, 1, result);
}
else {
int32_t m = (n - 1) / kernel_num + 1;
hipLaunchKernelGGL(( generate_kernel) , dim3(kernel_num), dim3(1) , 0, 0, devPHILOXStates, n, m, result);
}
}
__global__ void setup_kernel(hiprandStatePhilox4_32_10_t* state, int n, long clock_for_rand)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id<0 || id>n)
{
return;
}
hiprand_init(clock_for_rand, id, 0, &state[id]);
}
__global__ void generate_kernel(hiprandStatePhilox4_32_10_t* state, int n, int m, uint64_t* result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
unsigned int x;
hiprandStatePhilox4_32_10_t localState = state[id];
for (int i = 0; i < m; ++i) {
int idx = id * m + i;
if (idx < n) {
uint64_t x0 = hiprand(&localState); // &0x1;
uint64_t x1 = hiprand(&localState); // &0x1;
result[idx] = ((x1 << 32) | x0); // &0xffffffff;
}
}
state[id] = localState;
}
| b08c96b4f0f548cab270d246353e8bf5f33a152c.cu | #include "randkernel.cuh"
common::RandEngine::RandEngine(int32_t n) : kernel_num(n) {
cuda_handler(cudaMalloc((void**)&devPHILOXStates, sizeof(curandStatePhilox4_32_10_t) * kernel_num));
long clock_for_rand = clock();
setup_kernel <<<kernel_num, 1>>> (devPHILOXStates, kernel_num, clock_for_rand);
}
common::RandEngine::~RandEngine() {
cuda_handler(cudaFree(devPHILOXStates));
}
void common::RandEngine::rand(int32_t n, uint64_t* result) {
if (n < kernel_num) {
generate_kernel <<<n, 1 >>> (devPHILOXStates, n, 1, result);
}
else {
int32_t m = (n - 1) / kernel_num + 1;
generate_kernel <<<kernel_num, 1 >>> (devPHILOXStates, n, m, result);
}
}
__global__ void setup_kernel(curandStatePhilox4_32_10_t* state, int n, long clock_for_rand)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id<0 || id>n)
{
return;
}
curand_init(clock_for_rand, id, 0, &state[id]);
}
__global__ void generate_kernel(curandStatePhilox4_32_10_t* state, int n, int m, uint64_t* result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
unsigned int x;
curandStatePhilox4_32_10_t localState = state[id];
for (int i = 0; i < m; ++i) {
int idx = id * m + i;
if (idx < n) {
uint64_t x0 = curand(&localState); // &0x1;
uint64_t x1 = curand(&localState); // &0x1;
result[idx] = ((x1 << 32) | x0); // &0xffffffff;
}
}
state[id] = localState;
}
|
1d06031aa5aa36f98f3ef0095c95c5fa0a2a9b6f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Kernel2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,N,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,N,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,N,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1d06031aa5aa36f98f3ef0095c95c5fa0a2a9b6f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Kernel2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Kernel2<<<gridBlock,threadBlock>>>(A,N,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Kernel2<<<gridBlock,threadBlock>>>(A,N,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Kernel2<<<gridBlock,threadBlock>>>(A,N,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5e73d9cec75342eae5356decb99761b3a22bc161.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void fillTwoFloatsArraysKernel(
int numberRows,
int numberEntries,
float* firstArray,
float firstConstant,
float* secondArray,
float secondConstant) {
int start = indexInstance * numberEntries + indexColumn * numberRows + threadIdx.x;
for(int index = start; index < min(start + numberIterations, numberEntries); index++) {
firstArray[indexEntry] = firstConstant;
secondArray[indexEntry] = secondConstant;
}
} | 5e73d9cec75342eae5356decb99761b3a22bc161.cu | __global__ void fillTwoFloatsArraysKernel(
int numberRows,
int numberEntries,
float* firstArray,
float firstConstant,
float* secondArray,
float secondConstant) {
int start = indexInstance * numberEntries + indexColumn * numberRows + threadIdx.x;
for(int index = start; index < min(start + numberIterations, numberEntries); index++) {
firstArray[indexEntry] = firstConstant;
secondArray[indexEntry] = secondConstant;
}
} |
a2f561153976f6d35fb80a1d9db2a9746ee2f0c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdio.h>
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols)
{
int y = threadIdx.y+ blockIdx.y* blockDim.y;
int x = threadIdx.x+ blockIdx.x* blockDim.x;
if (y < numCols && x < numRows) {
int index = numRows*y + x;
uchar4 color = rgbaImage[index];
unsigned char grey = (unsigned char)(0.299f*color.x+ 0.587f*color.y + 0.114f*color.z);
greyImage[index] = grey;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
int blocksX = numRows/blockWidth+1;
int blocksY = numCols/blockWidth+1;
const dim3 gridSize( blocksX, blocksY, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| a2f561153976f6d35fb80a1d9db2a9746ee2f0c2.cu | #include "utils.h"
#include <stdio.h>
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols)
{
int y = threadIdx.y+ blockIdx.y* blockDim.y;
int x = threadIdx.x+ blockIdx.x* blockDim.x;
if (y < numCols && x < numRows) {
int index = numRows*y + x;
uchar4 color = rgbaImage[index];
unsigned char grey = (unsigned char)(0.299f*color.x+ 0.587f*color.y + 0.114f*color.z);
greyImage[index] = grey;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int blockWidth = 32;
const dim3 blockSize(blockWidth, blockWidth, 1);
int blocksX = numRows/blockWidth+1;
int blocksY = numCols/blockWidth+1;
const dim3 gridSize( blocksX, blocksY, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
28ff7c63260f8b6844c764cfbc6c09dd70b8266d.hip | // !!! This is a file automatically generated by hipify!!!
// Matrix multiplication by parts
// Elements stored in row-major order
using namespace std;
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include "helper_functions.h"
#define BLOCK_SIZE 16
#define ITER_COUNT 1000
typedef struct {
int width;
int height;
float *elements;
} Matrix;
// Forward declaration of matrix mult
__global__ void MatMulKernel (const Matrix, const Matrix, Matrix);
// Host code
void MatMulGPU(const Matrix A, const Matrix B, Matrix C) {
// Load matrices A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void**) &d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc((void**) &d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// allocate C in device
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = d_C.width * d_C.height * sizeof(float);
hipMalloc((void**) &d_C.elements, size);
// call kernel
dim3 dimBlock(256); // threads per block?
dim3 dimGrid(256); // number of blocks?
// TIMER START
StopWatchInterface *timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
for(int i=0; i<ITER_COUNT; ++i) {
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
}
// TIMER STOP
sdkStopTimer(&timer);
float time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
printf("GPU Elapsed time: %f\n", time/ITER_COUNT);
// copy C to host
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
// free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
//matrix multiplication kernel
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > A.height || col > B.width) return;
for (int e = 0; e < A.width; ++e) {
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
void MatMulCPU(Matrix A, Matrix B, Matrix C) {
// TIMER START
StopWatchInterface *timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
for(int i=0; i<ITER_COUNT; ++i) {
for(int row=0; row<A.height; row++) {
for(int col=0; col<B.width; col++) {
float Cvalue = 0;
for (int e = 0; e < A.width; ++e) {
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
}
}
// TIMER STOP
sdkStopTimer(&timer);
float time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
printf("CPU Elapsed time: %f\n", time/ITER_COUNT);
}
int main(int argc, char * const argv[]) {
int Width = 16;
Matrix A;
Matrix B;
Matrix C;
A.width = Width;
B.width = Width;
C.width = Width;
A.height = Width;
B.height = Width;
C.height = Width;
A.elements = new float[Width*Width];
B.elements = new float[Width*Width];
C.elements = new float[Width*Width];
//fill matrices
std::ifstream A_input;
std::ifstream B_input;
A_input.open("A.txt");
B_input.open("B.txt");
float a, b;
A_input >> a;
B_input >> b;
int i = 0;
while (!A_input.eof()) {
A.elements[i] = a;
B.elements[i] = b;
A_input >> a;
B_input >> b;
i += 1;
}
A_input.close();
B_input.close();
// TIMER START
StopWatchInterface *timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
MatMulGPU(A, B, C);
// TIMER STOP
sdkStopTimer(&timer);
float gpuTime = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
// TIMER START
timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
MatMulCPU(A, B, C);
// TIMER STOP
sdkStopTimer(&timer);
float cpuTime = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
std::ofstream C_output;
C_output.open("C.txt");
for (int i=0; i<Width; i++) {
for (int j=0; j<Width; j++) {
C_output<<C.elements[i*Width+j]<<"\t";
}
C_output<<endl;
}
C_output.close();
printf("GPU Elapsed total time: %f\n", gpuTime);
printf("CPU Elapsed total time: %f\n", cpuTime);
delete A.elements;
delete B.elements;
delete C.elements;
hipDeviceReset();
return 0;
}
| 28ff7c63260f8b6844c764cfbc6c09dd70b8266d.cu | // Matrix multiplication by parts
// Elements stored in row-major order
using namespace std;
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <cuda.h>
#include "helper_functions.h"
#define BLOCK_SIZE 16
#define ITER_COUNT 1000
typedef struct {
int width;
int height;
float *elements;
} Matrix;
// Forward declaration of matrix mult
__global__ void MatMulKernel (const Matrix, const Matrix, Matrix);
// Host code
void MatMulGPU(const Matrix A, const Matrix B, Matrix C) {
// Load matrices A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void**) &d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void**) &d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// allocate C in device
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = d_C.width * d_C.height * sizeof(float);
cudaMalloc((void**) &d_C.elements, size);
// call kernel
dim3 dimBlock(256); // threads per block?
dim3 dimGrid(256); // number of blocks?
// TIMER START
StopWatchInterface *timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
for(int i=0; i<ITER_COUNT; ++i) {
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
}
// TIMER STOP
sdkStopTimer(&timer);
float time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
printf("GPU Elapsed time: %f\n", time/ITER_COUNT);
// copy C to host
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
//matrix multiplication kernel
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > A.height || col > B.width) return;
for (int e = 0; e < A.width; ++e) {
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
void MatMulCPU(Matrix A, Matrix B, Matrix C) {
// TIMER START
StopWatchInterface *timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
for(int i=0; i<ITER_COUNT; ++i) {
for(int row=0; row<A.height; row++) {
for(int col=0; col<B.width; col++) {
float Cvalue = 0;
for (int e = 0; e < A.width; ++e) {
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
}
}
// TIMER STOP
sdkStopTimer(&timer);
float time = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
printf("CPU Elapsed time: %f\n", time/ITER_COUNT);
}
int main(int argc, char * const argv[]) {
int Width = 16;
Matrix A;
Matrix B;
Matrix C;
A.width = Width;
B.width = Width;
C.width = Width;
A.height = Width;
B.height = Width;
C.height = Width;
A.elements = new float[Width*Width];
B.elements = new float[Width*Width];
C.elements = new float[Width*Width];
//fill matrices
std::ifstream A_input;
std::ifstream B_input;
A_input.open("A.txt");
B_input.open("B.txt");
float a, b;
A_input >> a;
B_input >> b;
int i = 0;
while (!A_input.eof()) {
A.elements[i] = a;
B.elements[i] = b;
A_input >> a;
B_input >> b;
i += 1;
}
A_input.close();
B_input.close();
// TIMER START
StopWatchInterface *timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
MatMulGPU(A, B, C);
// TIMER STOP
sdkStopTimer(&timer);
float gpuTime = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
// TIMER START
timer=NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
MatMulCPU(A, B, C);
// TIMER STOP
sdkStopTimer(&timer);
float cpuTime = sdkGetTimerValue(&timer);
sdkDeleteTimer(&timer);
std::ofstream C_output;
C_output.open("C.txt");
for (int i=0; i<Width; i++) {
for (int j=0; j<Width; j++) {
C_output<<C.elements[i*Width+j]<<"\t";
}
C_output<<endl;
}
C_output.close();
printf("GPU Elapsed total time: %f\n", gpuTime);
printf("CPU Elapsed total time: %f\n", cpuTime);
delete A.elements;
delete B.elements;
delete C.elements;
cudaDeviceReset();
return 0;
}
|
b38f94b806cdb99e8c677a8730f93ef4809056af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////temp_yanglp: sortkernel3
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Radixsort project with key/value and arbitrary datset size support
* which demonstrates the use of CUDA in a multi phase sorting
* computation.
* Device code.
*/
#ifndef _RADIXSORT_KERNEL_H_
#define _RADIXSORT_KERNEL_H_
#include <stdio.h>
#include "radixsort.cuh"
#define SYNCIT __syncthreads()
static const int NUM_SMS = 16;
static const int NUM_THREADS_PER_SM = 192;
static const int NUM_THREADS_PER_BLOCK = 64;
//static const int NUM_THREADS = NUM_THREADS_PER_SM * NUM_SMS;
static const int NUM_BLOCKS = (NUM_THREADS_PER_SM / NUM_THREADS_PER_BLOCK) * NUM_SMS;
static const int RADIX = 8; // Number of bits per radix sort pass
static const int RADICES = 1 << RADIX; // Number of radices
static const int RADIXMASK = RADICES - 1; // Mask for each radix sort pass
#if SIXTEEN
static const int RADIXBITS = 16; // Number of bits to sort over
#else
static const int RADIXBITS = 32; // Number of bits to sort over
#endif
static const int RADIXTHREADS = 16; // Number of threads sharing each radix counter
static const int RADIXGROUPS = NUM_THREADS_PER_BLOCK / RADIXTHREADS; // Number of radix groups per CTA
static const int TOTALRADIXGROUPS = NUM_BLOCKS * RADIXGROUPS; // Number of radix groups for each radix
static const int SORTRADIXGROUPS = TOTALRADIXGROUPS * RADICES; // Total radix count
static const int GRFELEMENTS = (NUM_THREADS_PER_BLOCK / RADIXTHREADS) * RADICES;
static const int GRFSIZE = GRFELEMENTS * sizeof(uint);
// Prefix sum variables
static const int PREFIX_NUM_THREADS_PER_SM = NUM_THREADS_PER_SM;
static const int PREFIX_NUM_THREADS_PER_BLOCK = PREFIX_NUM_THREADS_PER_SM;
static const int PREFIX_NUM_BLOCKS = (PREFIX_NUM_THREADS_PER_SM / PREFIX_NUM_THREADS_PER_BLOCK) * NUM_SMS;
static const int PREFIX_BLOCKSIZE = SORTRADIXGROUPS / PREFIX_NUM_BLOCKS;
static const int PREFIX_GRFELEMENTS = PREFIX_BLOCKSIZE + 2 * PREFIX_NUM_THREADS_PER_BLOCK;
static const int PREFIX_GRFSIZE = PREFIX_GRFELEMENTS * sizeof(uint);
// Shuffle variables
static const int SHUFFLE_GRFOFFSET = RADIXGROUPS * RADICES;
static const int SHUFFLE_GRFELEMENTS = SHUFFLE_GRFOFFSET + PREFIX_NUM_BLOCKS;
static const int SHUFFLE_GRFSIZE = SHUFFLE_GRFELEMENTS * sizeof(uint);
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// Prefix sum data
uint gRadixSum[TOTALRADIXGROUPS * RADICES];
__device__ uint dRadixSum[TOTALRADIXGROUPS * RADICES];
uint gRadixBlockSum[PREFIX_NUM_BLOCKS];
__device__ uint dRadixBlockSum[PREFIX_NUM_BLOCKS];
extern __shared__ uint sRadixSum[];
////////////////////////////////////////////////////////////////////////////////
//! Perform a radix sum on the list to be sorted. Each SM holds a set of
//! radix counters for each group of RADIXGROUPS thread in the GRF.
//!
//! @param pData input data
//! @param elements total number of elements
//! @param elements_rounded_to_3072 total number of elements rounded up to the
//! nearest multiple of 3072
//! @param shift the shift (0 to 24) that we are using to obtain the correct
//! byte
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixSum(KeyValuePair *pData, uint elements, uint elements_rounded_to_3072, uint shift)
{
uint pos = threadIdx.x;
// Zero radix counts
while (pos < GRFELEMENTS)
{
sRadixSum[pos] = 0;
pos += NUM_THREADS_PER_BLOCK;
}
// Sum up data
// Source addresses computed so that each thread is reading from a block of
// consecutive addresses so there are no conflicts between threads
// They then loop over their combined region and the next batch works elsewhere.
// So threads 0 to 16 work on memory 0 to 320.
// First reading 0,1,2,3...15 then 16,17,18,19...31 and so on
// optimising parallel access to shared memory by a thread accessing 16*threadID
// The next radix group runs from 320 to 640 and the same applies in that region
uint tmod = threadIdx.x % RADIXTHREADS;
uint tpos = threadIdx.x / RADIXTHREADS;
// Take the rounded element list size so that all threads have a certain size dataset to work with
// and no zero size datasets confusing the issue
// By using a multiple of 3072 we ensure that all threads have elements
// to work with until the last phase, at which point we individually test
uint element_fraction = elements_rounded_to_3072 / TOTALRADIXGROUPS;
// Generate range
// Note that it is possible for both pos and end to be past the end of the element set
// which will be caught later.
pos = (blockIdx.x * RADIXGROUPS + tpos) * element_fraction;
uint end = pos + element_fraction;
pos += tmod;
//printf("pos: %d\n", pos);
__syncthreads();
while (pos < end)
{
uint key = 0;
// Read first data element if we are in the set of elements
//if( pos < elements )
//key = pData[pos].key;
KeyValuePair kvp;
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
kvp = pData[pos];
else
kvp.key = 0;
key = kvp.key;
// Calculate position of radix counter to increment
// There are RADICES radices in each pass (256)
// and hence this many counters for bin grouping
// Multiply by RADIXGROUPS (4) to spread through memory
// and into 4 radix groups
uint p = ((key >> shift) & RADIXMASK) * RADIXGROUPS;
// Increment radix counters
// Each radix group has its own set of counters
// so we add the thread position [0-3], ie the group index.
// We slow down here and take at least 16 cycles to write to the summation boxes
// but other groups will only conflict with themselves and so can also be writing
// 16 cycles here at least avoids retries.
uint ppos = p + tpos;
// If we are past the last element we don't want to do anything
// We do have to check each time, however, to ensure that all
// threads sync on each sync here.
if (tmod == 0 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 1 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 2 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 3 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 4 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 5 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 6 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 7 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 8 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 9 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 10 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 11 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 12 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 13 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 14 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 15 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
pos += RADIXTHREADS;
}
__syncthreads();
__syncthreads();
// Output radix sums into separate memory regions for each radix group
// So this memory then is layed out:
// 0...... 192..... 384 ................ 192*256
// ie all 256 bins for each radix group
// in there:
// 0.............192
// 0 4 8 12... - block idx * 4
// And in the block boxes we see the 4 radix groups for that block
// So 0-192 should contain bin 0 for each radix group, and so on
uint offset = blockIdx.x * RADIXGROUPS;
uint row = threadIdx.x / RADIXGROUPS;
uint column = threadIdx.x % RADIXGROUPS;
while (row < RADICES)
{
dRadixSum[offset + row * TOTALRADIXGROUPS + column] = sRadixSum[row * RADIXGROUPS + column];
row += NUM_THREADS_PER_BLOCK / RADIXGROUPS;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Performs first part of parallel prefix sum - individual sums of each radix
//! count. By the end of this we have prefix sums on a block level in dRadixSum
//! and totals for blocks in dRadixBlockSum.
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixPrefixSum()
{
// Read radix groups in offset by one in the GRF so a zero can be inserted at the beginning
// and the final sum of all radix counts summed here is tacked onto the end for reading by
// the next stage
// Each block in this case is the full number of threads per SM (and hence the total number
// of radix groups), 192. We should then have the total set of offsets for an entire radix
// group by the end of this stage
// Device mem addressing
uint brow = blockIdx.x * (RADICES / PREFIX_NUM_BLOCKS);
uint drow = threadIdx.x / TOTALRADIXGROUPS; // In default parameterisation this is always 0
uint dcolumn = threadIdx.x % TOTALRADIXGROUPS; // And similarly this is always the same as threadIdx.x
uint dpos = (brow + drow) * TOTALRADIXGROUPS + dcolumn;
uint end = ((blockIdx.x + 1) * (RADICES / PREFIX_NUM_BLOCKS)) * TOTALRADIXGROUPS;
// Shared mem addressing
uint srow = threadIdx.x / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint scolumn = threadIdx.x % (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint spos = srow * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) + scolumn;
// Read (RADICES / PREFIX_NUM_BLOCKS) radix counts into the GRF alongside each other
while (dpos < end)
{
sRadixSum[spos] = dRadixSum[dpos];
spos += (PREFIX_NUM_THREADS_PER_BLOCK / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK)) *
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
dpos += (TOTALRADIXGROUPS / PREFIX_NUM_THREADS_PER_BLOCK) * TOTALRADIXGROUPS;
}
__syncthreads();
// Perform preliminary sum on each thread's stretch of data
// Each thread having a block of 16, with spacers between 0...16 18...33 and so on
int pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
end = pos + (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint sum = 0;
while (pos < end)
{
sum += sRadixSum[pos];
sRadixSum[pos] = sum;
pos++;
}
__syncthreads();
// Calculate internal offsets by performing a more traditional parallel
// prefix sum of the topmost member of each thread's work data. Right now,
// these are stored between the work data for each thread, allowing us to
// eliminate GRF conflicts as well as hold the offsets needed to complete the sum
// In other words we have:
// 0....15 16 17....32 33 34....
// Where this first stage updates the intermediate values (so 16=15, 33=32 etc)
int m = (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) +
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
sRadixSum[pos] = sRadixSum[pos - 1];
__syncthreads();
// This stage then performs a parallel prefix sum (ie use powers of 2 to propagate in log n stages)
// to update 17, 34 etc with the totals to that point (so 34 becomes [34] + [17]) and so on.
while (m < PREFIX_NUM_THREADS_PER_BLOCK * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1))
{
int p = pos - m;
uint t = ((p > 0) ? sRadixSum[p] : 0);
__syncthreads();
sRadixSum[pos] += t;
__syncthreads();
m *= 2;
}
__syncthreads();
// Add internal offsets to each thread's work data.
// So now we take 17 and add it to all values 18 to 33 so all offsets for that block
// are updated.
pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
end = pos + (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
int p = pos - 1;
sum = ((p > 0) ? sRadixSum[p] : 0);
while (pos < end)
{
sRadixSum[pos] += sum;
pos++;
}
__syncthreads();
// Write summed data back out to global memory in the same way as we read it in
// We now have prefix sum values internal to groups
brow = blockIdx.x * (RADICES / PREFIX_NUM_BLOCKS);
drow = threadIdx.x / TOTALRADIXGROUPS;
dcolumn = threadIdx.x % TOTALRADIXGROUPS;
srow = threadIdx.x / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
scolumn = threadIdx.x % (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
dpos = (brow + drow) * TOTALRADIXGROUPS + dcolumn + 1;
spos = srow * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) + scolumn;
end = ((blockIdx.x + 1) * RADICES / PREFIX_NUM_BLOCKS) * TOTALRADIXGROUPS;
while (dpos < end)
{
dRadixSum[dpos] = sRadixSum[spos];
dpos += (TOTALRADIXGROUPS / PREFIX_NUM_THREADS_PER_BLOCK) * TOTALRADIXGROUPS;
spos += (PREFIX_NUM_THREADS_PER_BLOCK / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK)) *
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
}
// Write last element to summation
// Storing block sums in a separate array
if (threadIdx.x == 0) {
dRadixBlockSum[blockIdx.x] = sRadixSum[PREFIX_NUM_THREADS_PER_BLOCK * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) - 1];
dRadixSum[blockIdx.x * PREFIX_BLOCKSIZE] = 0;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Initially perform prefix sum of block totals to obtain final set of offsets.
//! Then make use of radix sums to perform a shuffling of the data into the
//! correct bins.
//!
//! @param pSrc input data
//! @param pDst output data
//! @param elements total number of elements
//! @param shift the shift (0 to 24) that we are using to obtain the correct
//! byte
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixAddOffsetsAndShuffle(KeyValuePair* pSrc, KeyValuePair* pDst, uint elements, uint elements_rounded_to_3072, int shift)
{
// Read offsets from previous blocks
if (threadIdx.x == 0)
sRadixSum[SHUFFLE_GRFOFFSET] = 0;
if (threadIdx.x < PREFIX_NUM_BLOCKS - 1)
sRadixSum[SHUFFLE_GRFOFFSET + threadIdx.x + 1] = dRadixBlockSum[threadIdx.x];
__syncthreads();
// Parallel prefix sum over block sums
int pos = threadIdx.x;
int n = 1;
while (n < PREFIX_NUM_BLOCKS)
{
int ppos = pos - n;
uint t0 = ((pos < PREFIX_NUM_BLOCKS) && (ppos >= 0)) ? sRadixSum[SHUFFLE_GRFOFFSET + ppos] : 0;
__syncthreads();
if (pos < PREFIX_NUM_BLOCKS)
sRadixSum[SHUFFLE_GRFOFFSET + pos] += t0;
__syncthreads();
n *= 2;
}
// Read radix count data and add appropriate block offset
// for each radix at the memory location for this thread
// (where the other threads in the block will be reading
// as well, hence the large stride).
// There is one counter box per radix group per radix
// per block (4*256*3)
// We use 64 threads to read the 4 radix groups set of radices
// for the block.
int row = threadIdx.x / RADIXGROUPS;
int column = threadIdx.x % RADIXGROUPS;
int spos = row * RADIXGROUPS + column;
int dpos = row * TOTALRADIXGROUPS + column + blockIdx.x * RADIXGROUPS;
while (spos < SHUFFLE_GRFOFFSET)
{
sRadixSum[spos] = dRadixSum[dpos] + sRadixSum[SHUFFLE_GRFOFFSET + dpos / (TOTALRADIXGROUPS * RADICES / PREFIX_NUM_BLOCKS)];
spos += NUM_THREADS_PER_BLOCK;
dpos += (NUM_THREADS_PER_BLOCK / RADIXGROUPS) * TOTALRADIXGROUPS;
}
__syncthreads();
//int pos;
// Shuffle data
// Each of the subbins for a block should be filled via the counters, properly interleaved
// Then, as we now iterate over each data value, we increment the subbins (each thread in the
// radix group in turn to avoid miss writes due to conflicts) and set locations correctly.
uint element_fraction = elements_rounded_to_3072 / TOTALRADIXGROUPS;
int tmod = threadIdx.x % RADIXTHREADS;
int tpos = threadIdx.x / RADIXTHREADS;
pos = (blockIdx.x * RADIXGROUPS + tpos) * element_fraction;
uint end = pos + element_fraction; //(blockIdx.x * RADIXGROUPS + tpos + 1) * element_fraction;
pos += tmod;
__syncthreads();
while (pos < end)
{
KeyValuePair kvp;
#if 1 // old load
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
{
kvp = pSrc[pos];
}
else
kvp.key = 0;
#else // casting to float2 to get it to combine loads
int2 kvpf2;
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
{
// kvp = pSrc[pos];
kvpf2 = ((int2*)pSrc)[pos];
// printf("kvp: %f %f kvpf2: %f %f\n", kvp.key, kvp.value, kvpf2.x, kvpf2.y);
}
else
//kvp.key = 0;
kvpf2.x = 0;
kvp.key = kvpf2.x;
kvp.value = kvpf2.y;
#endif
uint index;
// Calculate position of radix counter to increment
uint p = ((kvp.key >> shift) & RADIXMASK) * RADIXGROUPS;
// Move data, keeping counts updated.
// Increment radix counters, relying on hexadecathread
// warp to prevent this code from stepping all over itself.
uint ppos = p + tpos;
if (tmod == 0 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 1 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 2 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 3 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 4 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 5 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 6 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 7 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 8 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 9 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 10 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 11 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 12 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 13 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 14 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 15 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
pos += RADIXTHREADS;
}
__syncthreads();
}
#endif // #ifndef _RADIXSORT_KERNEL_H_
| b38f94b806cdb99e8c677a8730f93ef4809056af.cu | ////temp_yanglp: 这个文件里是sort的主要kernel函数,共3个
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Radixsort project with key/value and arbitrary datset size support
* which demonstrates the use of CUDA in a multi phase sorting
* computation.
* Device code.
*/
#ifndef _RADIXSORT_KERNEL_H_
#define _RADIXSORT_KERNEL_H_
#include <stdio.h>
#include "radixsort.cuh"
#define SYNCIT __syncthreads()
static const int NUM_SMS = 16;
static const int NUM_THREADS_PER_SM = 192;
static const int NUM_THREADS_PER_BLOCK = 64;
//static const int NUM_THREADS = NUM_THREADS_PER_SM * NUM_SMS;
static const int NUM_BLOCKS = (NUM_THREADS_PER_SM / NUM_THREADS_PER_BLOCK) * NUM_SMS;
static const int RADIX = 8; // Number of bits per radix sort pass
static const int RADICES = 1 << RADIX; // Number of radices
static const int RADIXMASK = RADICES - 1; // Mask for each radix sort pass
#if SIXTEEN
static const int RADIXBITS = 16; // Number of bits to sort over
#else
static const int RADIXBITS = 32; // Number of bits to sort over
#endif
static const int RADIXTHREADS = 16; // Number of threads sharing each radix counter
static const int RADIXGROUPS = NUM_THREADS_PER_BLOCK / RADIXTHREADS; // Number of radix groups per CTA
static const int TOTALRADIXGROUPS = NUM_BLOCKS * RADIXGROUPS; // Number of radix groups for each radix
static const int SORTRADIXGROUPS = TOTALRADIXGROUPS * RADICES; // Total radix count
static const int GRFELEMENTS = (NUM_THREADS_PER_BLOCK / RADIXTHREADS) * RADICES;
static const int GRFSIZE = GRFELEMENTS * sizeof(uint);
// Prefix sum variables
static const int PREFIX_NUM_THREADS_PER_SM = NUM_THREADS_PER_SM;
static const int PREFIX_NUM_THREADS_PER_BLOCK = PREFIX_NUM_THREADS_PER_SM;
static const int PREFIX_NUM_BLOCKS = (PREFIX_NUM_THREADS_PER_SM / PREFIX_NUM_THREADS_PER_BLOCK) * NUM_SMS;
static const int PREFIX_BLOCKSIZE = SORTRADIXGROUPS / PREFIX_NUM_BLOCKS;
static const int PREFIX_GRFELEMENTS = PREFIX_BLOCKSIZE + 2 * PREFIX_NUM_THREADS_PER_BLOCK;
static const int PREFIX_GRFSIZE = PREFIX_GRFELEMENTS * sizeof(uint);
// Shuffle variables
static const int SHUFFLE_GRFOFFSET = RADIXGROUPS * RADICES;
static const int SHUFFLE_GRFELEMENTS = SHUFFLE_GRFOFFSET + PREFIX_NUM_BLOCKS;
static const int SHUFFLE_GRFSIZE = SHUFFLE_GRFELEMENTS * sizeof(uint);
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// Prefix sum data
uint gRadixSum[TOTALRADIXGROUPS * RADICES];
__device__ uint dRadixSum[TOTALRADIXGROUPS * RADICES];
uint gRadixBlockSum[PREFIX_NUM_BLOCKS];
__device__ uint dRadixBlockSum[PREFIX_NUM_BLOCKS];
extern __shared__ uint sRadixSum[];
////////////////////////////////////////////////////////////////////////////////
//! Perform a radix sum on the list to be sorted. Each SM holds a set of
//! radix counters for each group of RADIXGROUPS thread in the GRF.
//!
//! @param pData input data
//! @param elements total number of elements
//! @param elements_rounded_to_3072 total number of elements rounded up to the
//! nearest multiple of 3072
//! @param shift the shift (0 to 24) that we are using to obtain the correct
//! byte
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixSum(KeyValuePair *pData, uint elements, uint elements_rounded_to_3072, uint shift)
{
uint pos = threadIdx.x;
// Zero radix counts
while (pos < GRFELEMENTS)
{
sRadixSum[pos] = 0;
pos += NUM_THREADS_PER_BLOCK;
}
// Sum up data
// Source addresses computed so that each thread is reading from a block of
// consecutive addresses so there are no conflicts between threads
// They then loop over their combined region and the next batch works elsewhere.
// So threads 0 to 16 work on memory 0 to 320.
// First reading 0,1,2,3...15 then 16,17,18,19...31 and so on
// optimising parallel access to shared memory by a thread accessing 16*threadID
// The next radix group runs from 320 to 640 and the same applies in that region
uint tmod = threadIdx.x % RADIXTHREADS;
uint tpos = threadIdx.x / RADIXTHREADS;
// Take the rounded element list size so that all threads have a certain size dataset to work with
// and no zero size datasets confusing the issue
// By using a multiple of 3072 we ensure that all threads have elements
// to work with until the last phase, at which point we individually test
uint element_fraction = elements_rounded_to_3072 / TOTALRADIXGROUPS;
// Generate range
// Note that it is possible for both pos and end to be past the end of the element set
// which will be caught later.
pos = (blockIdx.x * RADIXGROUPS + tpos) * element_fraction;
uint end = pos + element_fraction;
pos += tmod;
//printf("pos: %d\n", pos);
__syncthreads();
while (pos < end)
{
uint key = 0;
// Read first data element if we are in the set of elements
//if( pos < elements )
//key = pData[pos].key;
KeyValuePair kvp;
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
kvp = pData[pos];
else
kvp.key = 0;
key = kvp.key;
// Calculate position of radix counter to increment
// There are RADICES radices in each pass (256)
// and hence this many counters for bin grouping
// Multiply by RADIXGROUPS (4) to spread through memory
// and into 4 radix groups
uint p = ((key >> shift) & RADIXMASK) * RADIXGROUPS;
// Increment radix counters
// Each radix group has its own set of counters
// so we add the thread position [0-3], ie the group index.
// We slow down here and take at least 16 cycles to write to the summation boxes
// but other groups will only conflict with themselves and so can also be writing
// 16 cycles here at least avoids retries.
uint ppos = p + tpos;
// If we are past the last element we don't want to do anything
// We do have to check each time, however, to ensure that all
// threads sync on each sync here.
if (tmod == 0 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 1 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 2 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 3 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 4 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 5 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 6 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 7 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 8 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 9 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 10 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 11 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 12 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 13 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 14 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 15 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
pos += RADIXTHREADS;
}
__syncthreads();
__syncthreads();
// Output radix sums into separate memory regions for each radix group
// So this memory then is layed out:
// 0...... 192..... 384 ................ 192*256
// ie all 256 bins for each radix group
// in there:
// 0.............192
// 0 4 8 12... - block idx * 4
// And in the block boxes we see the 4 radix groups for that block
// So 0-192 should contain bin 0 for each radix group, and so on
uint offset = blockIdx.x * RADIXGROUPS;
uint row = threadIdx.x / RADIXGROUPS;
uint column = threadIdx.x % RADIXGROUPS;
while (row < RADICES)
{
dRadixSum[offset + row * TOTALRADIXGROUPS + column] = sRadixSum[row * RADIXGROUPS + column];
row += NUM_THREADS_PER_BLOCK / RADIXGROUPS;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Performs first part of parallel prefix sum - individual sums of each radix
//! count. By the end of this we have prefix sums on a block level in dRadixSum
//! and totals for blocks in dRadixBlockSum.
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixPrefixSum()
{
// Read radix groups in offset by one in the GRF so a zero can be inserted at the beginning
// and the final sum of all radix counts summed here is tacked onto the end for reading by
// the next stage
// Each block in this case is the full number of threads per SM (and hence the total number
// of radix groups), 192. We should then have the total set of offsets for an entire radix
// group by the end of this stage
// Device mem addressing
uint brow = blockIdx.x * (RADICES / PREFIX_NUM_BLOCKS);
uint drow = threadIdx.x / TOTALRADIXGROUPS; // In default parameterisation this is always 0
uint dcolumn = threadIdx.x % TOTALRADIXGROUPS; // And similarly this is always the same as threadIdx.x
uint dpos = (brow + drow) * TOTALRADIXGROUPS + dcolumn;
uint end = ((blockIdx.x + 1) * (RADICES / PREFIX_NUM_BLOCKS)) * TOTALRADIXGROUPS;
// Shared mem addressing
uint srow = threadIdx.x / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint scolumn = threadIdx.x % (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint spos = srow * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) + scolumn;
// Read (RADICES / PREFIX_NUM_BLOCKS) radix counts into the GRF alongside each other
while (dpos < end)
{
sRadixSum[spos] = dRadixSum[dpos];
spos += (PREFIX_NUM_THREADS_PER_BLOCK / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK)) *
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
dpos += (TOTALRADIXGROUPS / PREFIX_NUM_THREADS_PER_BLOCK) * TOTALRADIXGROUPS;
}
__syncthreads();
// Perform preliminary sum on each thread's stretch of data
// Each thread having a block of 16, with spacers between 0...16 18...33 and so on
int pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
end = pos + (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint sum = 0;
while (pos < end)
{
sum += sRadixSum[pos];
sRadixSum[pos] = sum;
pos++;
}
__syncthreads();
// Calculate internal offsets by performing a more traditional parallel
// prefix sum of the topmost member of each thread's work data. Right now,
// these are stored between the work data for each thread, allowing us to
// eliminate GRF conflicts as well as hold the offsets needed to complete the sum
// In other words we have:
// 0....15 16 17....32 33 34....
// Where this first stage updates the intermediate values (so 16=15, 33=32 etc)
int m = (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) +
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
sRadixSum[pos] = sRadixSum[pos - 1];
__syncthreads();
// This stage then performs a parallel prefix sum (ie use powers of 2 to propagate in log n stages)
// to update 17, 34 etc with the totals to that point (so 34 becomes [34] + [17]) and so on.
while (m < PREFIX_NUM_THREADS_PER_BLOCK * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1))
{
int p = pos - m;
uint t = ((p > 0) ? sRadixSum[p] : 0);
__syncthreads();
sRadixSum[pos] += t;
__syncthreads();
m *= 2;
}
__syncthreads();
// Add internal offsets to each thread's work data.
// So now we take 17 and add it to all values 18 to 33 so all offsets for that block
// are updated.
pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
end = pos + (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
int p = pos - 1;
sum = ((p > 0) ? sRadixSum[p] : 0);
while (pos < end)
{
sRadixSum[pos] += sum;
pos++;
}
__syncthreads();
// Write summed data back out to global memory in the same way as we read it in
// We now have prefix sum values internal to groups
brow = blockIdx.x * (RADICES / PREFIX_NUM_BLOCKS);
drow = threadIdx.x / TOTALRADIXGROUPS;
dcolumn = threadIdx.x % TOTALRADIXGROUPS;
srow = threadIdx.x / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
scolumn = threadIdx.x % (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
dpos = (brow + drow) * TOTALRADIXGROUPS + dcolumn + 1;
spos = srow * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) + scolumn;
end = ((blockIdx.x + 1) * RADICES / PREFIX_NUM_BLOCKS) * TOTALRADIXGROUPS;
while (dpos < end)
{
dRadixSum[dpos] = sRadixSum[spos];
dpos += (TOTALRADIXGROUPS / PREFIX_NUM_THREADS_PER_BLOCK) * TOTALRADIXGROUPS;
spos += (PREFIX_NUM_THREADS_PER_BLOCK / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK)) *
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
}
// Write last element to summation
// Storing block sums in a separate array
if (threadIdx.x == 0) {
dRadixBlockSum[blockIdx.x] = sRadixSum[PREFIX_NUM_THREADS_PER_BLOCK * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) - 1];
dRadixSum[blockIdx.x * PREFIX_BLOCKSIZE] = 0;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Initially perform prefix sum of block totals to obtain final set of offsets.
//! Then make use of radix sums to perform a shuffling of the data into the
//! correct bins.
//!
//! @param pSrc input data
//! @param pDst output data
//! @param elements total number of elements
//! @param shift the shift (0 to 24) that we are using to obtain the correct
//! byte
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixAddOffsetsAndShuffle(KeyValuePair* pSrc, KeyValuePair* pDst, uint elements, uint elements_rounded_to_3072, int shift)
{
// Read offsets from previous blocks
if (threadIdx.x == 0)
sRadixSum[SHUFFLE_GRFOFFSET] = 0;
if (threadIdx.x < PREFIX_NUM_BLOCKS - 1)
sRadixSum[SHUFFLE_GRFOFFSET + threadIdx.x + 1] = dRadixBlockSum[threadIdx.x];
__syncthreads();
// Parallel prefix sum over block sums
int pos = threadIdx.x;
int n = 1;
while (n < PREFIX_NUM_BLOCKS)
{
int ppos = pos - n;
uint t0 = ((pos < PREFIX_NUM_BLOCKS) && (ppos >= 0)) ? sRadixSum[SHUFFLE_GRFOFFSET + ppos] : 0;
__syncthreads();
if (pos < PREFIX_NUM_BLOCKS)
sRadixSum[SHUFFLE_GRFOFFSET + pos] += t0;
__syncthreads();
n *= 2;
}
// Read radix count data and add appropriate block offset
// for each radix at the memory location for this thread
// (where the other threads in the block will be reading
// as well, hence the large stride).
// There is one counter box per radix group per radix
// per block (4*256*3)
// We use 64 threads to read the 4 radix groups set of radices
// for the block.
int row = threadIdx.x / RADIXGROUPS;
int column = threadIdx.x % RADIXGROUPS;
int spos = row * RADIXGROUPS + column;
int dpos = row * TOTALRADIXGROUPS + column + blockIdx.x * RADIXGROUPS;
while (spos < SHUFFLE_GRFOFFSET)
{
sRadixSum[spos] = dRadixSum[dpos] + sRadixSum[SHUFFLE_GRFOFFSET + dpos / (TOTALRADIXGROUPS * RADICES / PREFIX_NUM_BLOCKS)];
spos += NUM_THREADS_PER_BLOCK;
dpos += (NUM_THREADS_PER_BLOCK / RADIXGROUPS) * TOTALRADIXGROUPS;
}
__syncthreads();
//int pos;
// Shuffle data
// Each of the subbins for a block should be filled via the counters, properly interleaved
// Then, as we now iterate over each data value, we increment the subbins (each thread in the
// radix group in turn to avoid miss writes due to conflicts) and set locations correctly.
uint element_fraction = elements_rounded_to_3072 / TOTALRADIXGROUPS;
int tmod = threadIdx.x % RADIXTHREADS;
int tpos = threadIdx.x / RADIXTHREADS;
pos = (blockIdx.x * RADIXGROUPS + tpos) * element_fraction;
uint end = pos + element_fraction; //(blockIdx.x * RADIXGROUPS + tpos + 1) * element_fraction;
pos += tmod;
__syncthreads();
while (pos < end)
{
KeyValuePair kvp;
#if 1 // old load
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
{
kvp = pSrc[pos];
}
else
kvp.key = 0;
#else // casting to float2 to get it to combine loads
int2 kvpf2;
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
{
// kvp = pSrc[pos];
kvpf2 = ((int2*)pSrc)[pos];
// printf("kvp: %f %f kvpf2: %f %f\n", kvp.key, kvp.value, kvpf2.x, kvpf2.y);
}
else
//kvp.key = 0;
kvpf2.x = 0;
kvp.key = kvpf2.x;
kvp.value = kvpf2.y;
#endif
uint index;
// Calculate position of radix counter to increment
uint p = ((kvp.key >> shift) & RADIXMASK) * RADIXGROUPS;
// Move data, keeping counts updated.
// Increment radix counters, relying on hexadecathread
// warp to prevent this code from stepping all over itself.
uint ppos = p + tpos;
if (tmod == 0 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 1 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 2 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 3 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 4 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 5 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 6 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 7 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 8 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 9 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 10 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 11 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 12 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 13 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 14 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 15 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
pos += RADIXTHREADS;
}
__syncthreads();
}
#endif // #ifndef _RADIXSORT_KERNEL_H_
|
3b566a82881c72c136f5467b7ff0363b2ac9d4b9.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//possible attempt to modify constant memory
//You can modify the values of the constants, uncomment the lines 14 and 16 to analyze this case.
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 2//1024
__constant__ int A[N] = {0, 1, 2, 3};
__global__ void foo(int *B) {
// assert(A[0]==0);
A[threadIdx.x] = B[threadIdx.x];
// assert(A[0]==0); // the constant memory was modified!!!
__syncthreads();
B[threadIdx.x] = A[threadIdx.x];
}
| 3b566a82881c72c136f5467b7ff0363b2ac9d4b9.cu | //xfail:BOOGIE_ERROR
//possible attempt to modify constant memory
//You can modify the values of the constants, uncomment the lines 14 and 16 to analyze this case.
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//1024
__constant__ int A[N] = {0, 1, 2, 3};
__global__ void foo(int *B) {
// assert(A[0]==0);
A[threadIdx.x] = B[threadIdx.x];
// assert(A[0]==0); // the constant memory was modified!!!
__syncthreads();
B[threadIdx.x] = A[threadIdx.x];
}
|
1584e2d427a664674701829d2d285d53b9480344.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_scalarSubf (size_t n, float *result, float x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
} | 1584e2d427a664674701829d2d285d53b9480344.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_scalarSubf (size_t n, float *result, float x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
} |
1e0f41afa247abdf05155af4b859fccdea764690.hip | // !!! This is a file automatically generated by hipify!!!
#include "hipfft.h"
#include <iostream>
#define C2R 1
#define R2C 2
#define C2C 3
#define Z2D 5
#define D2Z 6
#define Z2Z 7
#define _FROMTO FROMTO
#if _FROMTO == Z2Z
#define TO_TYPE hipfftDoubleComplex
#define FROM_TYPE hipfftDoubleComplex
#define FROMTO_STR "double precision complex-to-complex"
#elif _FROMTO == D2Z
#define TO_TYPE hipfftDoubleComplex
#define FROM_TYPE hipfftDoubleReal
#define FROMTO_STR "double precision real-to-complex"
#elif _FROMTO == Z2D
#define TO_TYPE hipfftDoubleReal
#define FROM_TYPE hipfftDoubleComplex
#define FROMTO_STR "double precision complex-to-real"
#elif _FROMTO == C2C
#define TO_TYPE hipfftComplex
#define FROM_TYPE hipfftComplex
#define FROMTO_STR "single precision complex-to-complex"
#elif _FROMTO == R2C
#define TO_TYPE hipfftComplex
#define FROM_TYPE hipfftReal
#define FROMTO_STR "single precision real-to-complex"
#elif _FROMTO == C2R
#define TO_TYPE hipfftReal
#define FROM_TYPE hipfftComplex
#define FROMTO_STR "single precision complex-to-real"
#else
#error "FROMTO must be one of Z2Z, Z2D, D2Z, C2C, R2C and C2R"
#endif
template <class A, class B>
hipfftResult_t CUFFTPLAN2D(hipfftHandle *plan, int size_x, int size_y, A* in, B* out);
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftDoubleComplex* in, hipfftDoubleComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_Z2Z);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftDoubleReal* in, hipfftDoubleComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_D2Z);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftDoubleComplex* in, hipfftDoubleReal* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_Z2D);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftComplex* in, hipfftComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_C2C);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftReal* in, hipfftComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_R2C);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftComplex* in, hipfftReal* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_C2R);
}
template <class A, class B>
hipfftResult_t CUFFTEXEC(hipfftHandle plan, A* in, B* out);
hipfftResult_t CUFFTEXEC (
hipfftHandle plan, hipfftDoubleComplex* in, hipfftDoubleComplex* out) {
return hipfftExecZ2Z(plan, in, out, HIPFFT_FORWARD);
}
hipfftResult_t CUFFTEXEC(
hipfftHandle plan, hipfftDoubleReal* in, hipfftDoubleComplex* out) {
return hipfftExecD2Z(plan, in, out);
}
hipfftResult_t CUFFTEXEC(
hipfftHandle plan, hipfftDoubleComplex* in, hipfftDoubleReal* out) {
return hipfftExecZ2D(plan, in, out);
}
hipfftResult_t CUFFTEXEC(
hipfftHandle plan, hipfftComplex* in, hipfftComplex* out) {
return hipfftExecC2C(plan, in, out, HIPFFT_FORWARD);
}
hipfftResult_t CUFFTEXEC(
hipfftHandle plan, hipfftReal* in, hipfftComplex* out) {
return hipfftExecR2C(plan, in, out);
}
hipfftResult_t CUFFTEXEC(
hipfftHandle plan, hipfftComplex* in, hipfftReal* out) {
return hipfftExecC2R(plan, in, out);
}
int main(void) {
int NX=10112, NY=10112;
int size = NX*NY;
float elapsed;
hipfftHandle plan;
FROM_TYPE *data1;
hipMalloc(&data1, sizeof(FROM_TYPE)*NX*NY);
#ifndef INPLACE
TO_TYPE *data2;
hipMalloc(&data2, sizeof(TO_TYPE)*NX*NY);
#endif
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
hipError_t err;
err = hipGetLastError();
if (err) std::cout << "Error in initial copy" << std::endl;
std::cin >> NX >> NY;
std::cout << "**** " << FROMTO_STR << " ****" << std::endl;
std::cout << "dx, dy, elapsed, Gcell/s, Gflps" << std::endl;
#ifdef INPLACE
#define TARGET data1
#else
#define TARGET data2
#endif
while( NX != 0) {
hipfftResult_t r = CUFFTPLAN2D(&plan, NX, NY, data1, TARGET);
hipEventRecord(start);
for (int z=0; z< 5; z++)
if (!r) r = CUFFTEXEC(plan, data1, TARGET);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipError_t err;
err = hipGetLastError();
if (err) std::cout << NX << ", " << NY << " - Error " << err <<" : " <<
hipGetErrorString(err) << std::endl;
else if (r) std::cout << NX << ", " << NY << " - CUFFT Error " << r <<
std::endl;
else std::cout << NX << ", " << NY << ", " << elapsed/5 << ", "
<< 5*NX*NY/elapsed/1000/1000 << ", " << 5*5/elapsed/1000/1000*NX*NY*(log2(NX+0.000)+log2(NY+0.000)) << std::endl;
hipfftDestroy(plan);
std::cin >> NX >> NY;
if (NX*NY > size) {
std::cout << "Reallocating to " << NX << " x " << NY << std::endl;
hipFree(data1); data1=0;
hipMalloc(&data1, sizeof(hipfftDoubleComplex)*NX*NY);
if(!data1) std::cout << "Failed to allocate data1!" << std::endl;
#ifndef INPLACE
hipFree(data2); data2=0;
hipMalloc(&data2, sizeof(hipfftDoubleComplex)*NX*NY);
if(!data2) std::cout << "Failed to allocate data2!\n" << std::endl;
#endif
size = NX*NY;
}
}
std::cout << "0, 0" << std::endl;
//printf("(%d,%d) - Error %d: %s\n", NX, NY, err, hipGetErrorString(err));
hipFree(data1);
#ifndef INPLACE
hipFree(data2);
#endif
hipEventDestroy(start); hipEventDestroy(stop);
return 0;
}
| 1e0f41afa247abdf05155af4b859fccdea764690.cu | #include "cufft.h"
#include <iostream>
#define C2R 1
#define R2C 2
#define C2C 3
#define Z2D 5
#define D2Z 6
#define Z2Z 7
#define _FROMTO FROMTO
#if _FROMTO == Z2Z
#define TO_TYPE cufftDoubleComplex
#define FROM_TYPE cufftDoubleComplex
#define FROMTO_STR "double precision complex-to-complex"
#elif _FROMTO == D2Z
#define TO_TYPE cufftDoubleComplex
#define FROM_TYPE cufftDoubleReal
#define FROMTO_STR "double precision real-to-complex"
#elif _FROMTO == Z2D
#define TO_TYPE cufftDoubleReal
#define FROM_TYPE cufftDoubleComplex
#define FROMTO_STR "double precision complex-to-real"
#elif _FROMTO == C2C
#define TO_TYPE cufftComplex
#define FROM_TYPE cufftComplex
#define FROMTO_STR "single precision complex-to-complex"
#elif _FROMTO == R2C
#define TO_TYPE cufftComplex
#define FROM_TYPE cufftReal
#define FROMTO_STR "single precision real-to-complex"
#elif _FROMTO == C2R
#define TO_TYPE cufftReal
#define FROM_TYPE cufftComplex
#define FROMTO_STR "single precision complex-to-real"
#else
#error "FROMTO must be one of Z2Z, Z2D, D2Z, C2C, R2C and C2R"
#endif
template <class A, class B>
cufftResult_t CUFFTPLAN2D(cufftHandle *plan, int size_x, int size_y, A* in, B* out);
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftDoubleComplex* in, cufftDoubleComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_Z2Z);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftDoubleReal* in, cufftDoubleComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_D2Z);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftDoubleComplex* in, cufftDoubleReal* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_Z2D);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftComplex* in, cufftComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_C2C);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftReal* in, cufftComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_R2C);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftComplex* in, cufftReal* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_C2R);
}
template <class A, class B>
cufftResult_t CUFFTEXEC(cufftHandle plan, A* in, B* out);
cufftResult_t CUFFTEXEC (
cufftHandle plan, cufftDoubleComplex* in, cufftDoubleComplex* out) {
return cufftExecZ2Z(plan, in, out, CUFFT_FORWARD);
}
cufftResult_t CUFFTEXEC(
cufftHandle plan, cufftDoubleReal* in, cufftDoubleComplex* out) {
return cufftExecD2Z(plan, in, out);
}
cufftResult_t CUFFTEXEC(
cufftHandle plan, cufftDoubleComplex* in, cufftDoubleReal* out) {
return cufftExecZ2D(plan, in, out);
}
cufftResult_t CUFFTEXEC(
cufftHandle plan, cufftComplex* in, cufftComplex* out) {
return cufftExecC2C(plan, in, out, CUFFT_FORWARD);
}
cufftResult_t CUFFTEXEC(
cufftHandle plan, cufftReal* in, cufftComplex* out) {
return cufftExecR2C(plan, in, out);
}
cufftResult_t CUFFTEXEC(
cufftHandle plan, cufftComplex* in, cufftReal* out) {
return cufftExecC2R(plan, in, out);
}
int main(void) {
int NX=10112, NY=10112;
int size = NX*NY;
float elapsed;
cufftHandle plan;
FROM_TYPE *data1;
cudaMalloc(&data1, sizeof(FROM_TYPE)*NX*NY);
#ifndef INPLACE
TO_TYPE *data2;
cudaMalloc(&data2, sizeof(TO_TYPE)*NX*NY);
#endif
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaError_t err;
err = cudaGetLastError();
if (err) std::cout << "Error in initial copy" << std::endl;
std::cin >> NX >> NY;
std::cout << "**** " << FROMTO_STR << " ****" << std::endl;
std::cout << "dx, dy, elapsed, Gcell/s, Gflps" << std::endl;
#ifdef INPLACE
#define TARGET data1
#else
#define TARGET data2
#endif
while( NX != 0) {
cufftResult_t r = CUFFTPLAN2D(&plan, NX, NY, data1, TARGET);
cudaEventRecord(start);
for (int z=0; z< 5; z++)
if (!r) r = CUFFTEXEC(plan, data1, TARGET);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaError_t err;
err = cudaGetLastError();
if (err) std::cout << NX << ", " << NY << " - Error " << err <<" : " <<
cudaGetErrorString(err) << std::endl;
else if (r) std::cout << NX << ", " << NY << " - CUFFT Error " << r <<
std::endl;
else std::cout << NX << ", " << NY << ", " << elapsed/5 << ", "
<< 5*NX*NY/elapsed/1000/1000 << ", " << 5*5/elapsed/1000/1000*NX*NY*(log2(NX+0.000)+log2(NY+0.000)) << std::endl;
cufftDestroy(plan);
std::cin >> NX >> NY;
if (NX*NY > size) {
std::cout << "Reallocating to " << NX << " x " << NY << std::endl;
cudaFree(data1); data1=0;
cudaMalloc(&data1, sizeof(cufftDoubleComplex)*NX*NY);
if(!data1) std::cout << "Failed to allocate data1!" << std::endl;
#ifndef INPLACE
cudaFree(data2); data2=0;
cudaMalloc(&data2, sizeof(cufftDoubleComplex)*NX*NY);
if(!data2) std::cout << "Failed to allocate data2!\n" << std::endl;
#endif
size = NX*NY;
}
}
std::cout << "0, 0" << std::endl;
//printf("(%d,%d) - Error %d: %s\n", NX, NY, err, cudaGetErrorString(err));
cudaFree(data1);
#ifndef INPLACE
cudaFree(data2);
#endif
cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
}
|
072ada4adfd9214020b5aab566344f511c28f1bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha, Wei Hu
// =============================================================================
//
// Base class for processing proximity in fsi system.
// =============================================================================
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChCollisionSystemFsi.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
namespace chrono {
namespace fsi {
// calcHashD :
// 1. Get particle index determined by the block and thread we are in.
// 2. From x, y, z position, determine which bin it is in.
// 3. Calculate hash from bin index.
// 4. Store hash and particle index associated with it.
__global__ void calcHashD(
uint* gridMarkerHashD, // gridMarkerHash Store particle hash here
uint* gridMarkerIndexD, // gridMarkerIndex Store particle index here
Real4* posRad, // posRad Vector containing the positions of all particles (SPH and BCE)
const size_t numAllMarkers, // Total number of particles (fluid + boundary)
volatile bool* isErrorD) {
/* Calculate the index of where the particle is stored in posRad. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real3 p = mR3(posRad[index]);
if (!(isfinite(p.x) && isfinite(p.y) && isfinite(p.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, calcHashD !\n");
*isErrorD = true;
return;
}
/* Check particle is inside the domain. */
Real3 boxCorner = paramsD.worldOrigin - mR3(40 * paramsD.HSML);
if (p.x < boxCorner.x || p.y < boxCorner.y || p.z < boxCorner.z) {
printf(
"Out of Min Boundary, point %f %f %f, boundary min: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
boxCorner = paramsD.worldOrigin + paramsD.boxDims + mR3(40 * paramsD.HSML);
if (p.x > boxCorner.x || p.y > boxCorner.y || p.z > boxCorner.z) {
printf(
"Out of max Boundary, point %f %f %f, boundary max: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
/* Get x,y,z bin index in grid */
int3 gridPos = calcGridPos(p);
/* Calculate a hash from the bin index */
uint hash = calcGridHash(gridPos);
/* Store grid hash */
gridMarkerHashD[index] = hash;
/* Store particle index associated to the hash we stored in gridMarkerHashD */
gridMarkerIndexD[index] = index;
}
/**
* @brief reorderDataAndFindCellStartD
* @details See ChCollisionSystemFsi.cuh for more info
*/
__global__ void reorderDataAndFindCellStartD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
/* Now use the sorted index to reorder the pos and vel data */
uint originalIndex = gridMarkerIndexD[index]; // map sorted to original
Real3 posRad = mR3(posRadD[originalIndex]); // macro does either global read or
// texture fetch
Real3 velMas = velMasD[originalIndex]; // see particles_kernel.cuh
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
}
__global__ void findCellStartEndD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
}
}
__global__ void reorderDataD(uint* gridMarkerIndexD, // input: sorted particle indices
uint* extendedActivityIdD, // input: particles in an extended active sub-domain
uint* mapOriginalToSorted, // input: original index to sorted index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
// Now use the sorted index to reorder the pos and vel data
uint originalIndex = id;
// no need to do anything if it is not an active particle
uint activity = extendedActivityIdD[originalIndex];
if(activity == 0)
return;
// map original to sorted
uint index = mapOriginalToSorted[originalIndex];
Real3 posRad = mR3(posRadD[originalIndex]);
Real3 velMas = velMasD[originalIndex];
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
__global__ void OriginalToSortedD(uint* mapOriginalToSorted,
uint* gridMarkerIndex,
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
uint index = gridMarkerIndex[id];
mapOriginalToSorted[index] = id;
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::ChCollisionSystemFsi(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
fsiGeneralData(otherFsiGeneralData),
paramsH(otherParamsH),
numObjectsH(otherNumObjects) {
sphMarkersD = NULL;
}
ChCollisionSystemFsi::~ChCollisionSystemFsi() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::Initialize() {
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::calcHash() {
if (!(markersProximityD->gridMarkerHashD.size() == numObjectsH->numAllMarkers &&
markersProximityD->gridMarkerIndexD.size() == numObjectsH->numAllMarkers)) {
printf(
"mError! calcHash!, gridMarkerHashD.size() %zu "
"gridMarkerIndexD.size() %zu numObjectsH->numAllMarkers %zu \n",
markersProximityD->gridMarkerHashD.size(), markersProximityD->gridMarkerIndexD.size(),
numObjectsH->numAllMarkers);
throw std::runtime_error("Error! size error, calcHash!");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
/* Is there a need to optimize the number of threads used at once? */
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
/* Execute Kernel */
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads), 0, 0, U1CAST(markersProximityD->gridMarkerHashD),
U1CAST(markersProximityD->gridMarkerIndexD),
mR4CAST(sphMarkersD->posRadD),
numObjectsH->numAllMarkers, isErrorD);
/* Check for errors in kernel execution */
hipDeviceSynchronize();
cudaCheckError();
//------------------------------------------------------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in calcHashD!\n");
}
hipFree(isErrorD);
free(isErrorH);
}
void ChCollisionSystemFsi::ResetCellSize(int s) {
markersProximityD->cellStartD.resize(s);
markersProximityD->cellEndD.resize(s);
}
void ChCollisionSystemFsi::reorderDataAndFindCellStart() {
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
if (!(markersProximityD->cellStartD.size() == numCells && markersProximityD->cellEndD.size() == numCells)) {
throw std::runtime_error("Error! size error, reorderDataAndFindCellStart!\n");
}
thrust::fill(markersProximityD->cellStartD.begin(), markersProximityD->cellStartD.end(), 0);
thrust::fill(markersProximityD->cellEndD.begin(), markersProximityD->cellEndD.end(), 0);
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); // 256 is blockSize
uint smemSize = sizeof(uint) * (numThreads + 1);
// Find the start index and the end index of the sorted array in each cell
hipLaunchKernelGGL(( findCellStartEndD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD),
U1CAST(markersProximityD->gridMarkerHashD), U1CAST(markersProximityD->gridMarkerIndexD),
numObjectsH->numAllMarkers);
hipDeviceSynchronize();
cudaCheckError();
// Launch a kernel to find the location of original particles in the sorted arrays.
// This is faster than using thrust::sort_by_key()
hipLaunchKernelGGL(( OriginalToSortedD), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(markersProximityD->gridMarkerIndexD), numObjectsH->numAllMarkers);
// Reorder the arrays according to the sorted index of all particles
hipLaunchKernelGGL(( reorderDataD), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->extendedActivityIdD),
U1CAST(markersProximityD->mapOriginalToSorted),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(sortedSphMarkersD->tauXxYyZzD),
mR3CAST(sortedSphMarkersD->tauXyXzYzD), mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD),
numObjectsH->numAllMarkers);
hipDeviceSynchronize();
cudaCheckError();
}
void ChCollisionSystemFsi::ArrangeData(std::shared_ptr<SphMarkerDataD> otherSphMarkersD) {
sphMarkersD = otherSphMarkersD;
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
ResetCellSize(numCells);
calcHash();
thrust::sort_by_key(markersProximityD->gridMarkerHashD.begin(), markersProximityD->gridMarkerHashD.end(),
markersProximityD->gridMarkerIndexD.begin());
reorderDataAndFindCellStart();
}
} // end namespace fsi
} // end namespace chrono
| 072ada4adfd9214020b5aab566344f511c28f1bb.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha, Wei Hu
// =============================================================================
//
// Base class for processing proximity in fsi system.
// =============================================================================
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChCollisionSystemFsi.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
namespace chrono {
namespace fsi {
// calcHashD :
// 1. Get particle index determined by the block and thread we are in.
// 2. From x, y, z position, determine which bin it is in.
// 3. Calculate hash from bin index.
// 4. Store hash and particle index associated with it.
__global__ void calcHashD(
uint* gridMarkerHashD, // gridMarkerHash Store particle hash here
uint* gridMarkerIndexD, // gridMarkerIndex Store particle index here
Real4* posRad, // posRad Vector containing the positions of all particles (SPH and BCE)
const size_t numAllMarkers, // Total number of particles (fluid + boundary)
volatile bool* isErrorD) {
/* Calculate the index of where the particle is stored in posRad. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real3 p = mR3(posRad[index]);
if (!(isfinite(p.x) && isfinite(p.y) && isfinite(p.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, calcHashD !\n");
*isErrorD = true;
return;
}
/* Check particle is inside the domain. */
Real3 boxCorner = paramsD.worldOrigin - mR3(40 * paramsD.HSML);
if (p.x < boxCorner.x || p.y < boxCorner.y || p.z < boxCorner.z) {
printf(
"Out of Min Boundary, point %f %f %f, boundary min: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
boxCorner = paramsD.worldOrigin + paramsD.boxDims + mR3(40 * paramsD.HSML);
if (p.x > boxCorner.x || p.y > boxCorner.y || p.z > boxCorner.z) {
printf(
"Out of max Boundary, point %f %f %f, boundary max: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
/* Get x,y,z bin index in grid */
int3 gridPos = calcGridPos(p);
/* Calculate a hash from the bin index */
uint hash = calcGridHash(gridPos);
/* Store grid hash */
gridMarkerHashD[index] = hash;
/* Store particle index associated to the hash we stored in gridMarkerHashD */
gridMarkerIndexD[index] = index;
}
/**
* @brief reorderDataAndFindCellStartD
* @details See ChCollisionSystemFsi.cuh for more info
*/
__global__ void reorderDataAndFindCellStartD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
/* Now use the sorted index to reorder the pos and vel data */
uint originalIndex = gridMarkerIndexD[index]; // map sorted to original
Real3 posRad = mR3(posRadD[originalIndex]); // macro does either global read or
// texture fetch
Real3 velMas = velMasD[originalIndex]; // see particles_kernel.cuh
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
}
__global__ void findCellStartEndD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
}
}
__global__ void reorderDataD(uint* gridMarkerIndexD, // input: sorted particle indices
uint* extendedActivityIdD, // input: particles in an extended active sub-domain
uint* mapOriginalToSorted, // input: original index to sorted index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
// Now use the sorted index to reorder the pos and vel data
uint originalIndex = id;
// no need to do anything if it is not an active particle
uint activity = extendedActivityIdD[originalIndex];
if(activity == 0)
return;
// map original to sorted
uint index = mapOriginalToSorted[originalIndex];
Real3 posRad = mR3(posRadD[originalIndex]);
Real3 velMas = velMasD[originalIndex];
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
__global__ void OriginalToSortedD(uint* mapOriginalToSorted,
uint* gridMarkerIndex,
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
uint index = gridMarkerIndex[id];
mapOriginalToSorted[index] = id;
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::ChCollisionSystemFsi(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
fsiGeneralData(otherFsiGeneralData),
paramsH(otherParamsH),
numObjectsH(otherNumObjects) {
sphMarkersD = NULL;
}
ChCollisionSystemFsi::~ChCollisionSystemFsi() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::Initialize() {
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::calcHash() {
if (!(markersProximityD->gridMarkerHashD.size() == numObjectsH->numAllMarkers &&
markersProximityD->gridMarkerIndexD.size() == numObjectsH->numAllMarkers)) {
printf(
"mError! calcHash!, gridMarkerHashD.size() %zu "
"gridMarkerIndexD.size() %zu numObjectsH->numAllMarkers %zu \n",
markersProximityD->gridMarkerHashD.size(), markersProximityD->gridMarkerIndexD.size(),
numObjectsH->numAllMarkers);
throw std::runtime_error("Error! size error, calcHash!");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
/* Is there a need to optimize the number of threads used at once? */
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
/* Execute Kernel */
calcHashD<<<numBlocks, numThreads>>>(U1CAST(markersProximityD->gridMarkerHashD),
U1CAST(markersProximityD->gridMarkerIndexD),
mR4CAST(sphMarkersD->posRadD),
numObjectsH->numAllMarkers, isErrorD);
/* Check for errors in kernel execution */
cudaDeviceSynchronize();
cudaCheckError();
//------------------------------------------------------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in calcHashD!\n");
}
cudaFree(isErrorD);
free(isErrorH);
}
void ChCollisionSystemFsi::ResetCellSize(int s) {
markersProximityD->cellStartD.resize(s);
markersProximityD->cellEndD.resize(s);
}
void ChCollisionSystemFsi::reorderDataAndFindCellStart() {
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
if (!(markersProximityD->cellStartD.size() == numCells && markersProximityD->cellEndD.size() == numCells)) {
throw std::runtime_error("Error! size error, reorderDataAndFindCellStart!\n");
}
thrust::fill(markersProximityD->cellStartD.begin(), markersProximityD->cellStartD.end(), 0);
thrust::fill(markersProximityD->cellEndD.begin(), markersProximityD->cellEndD.end(), 0);
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); // 256 is blockSize
uint smemSize = sizeof(uint) * (numThreads + 1);
// Find the start index and the end index of the sorted array in each cell
findCellStartEndD<<<numBlocks, numThreads, smemSize>>>(
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD),
U1CAST(markersProximityD->gridMarkerHashD), U1CAST(markersProximityD->gridMarkerIndexD),
numObjectsH->numAllMarkers);
cudaDeviceSynchronize();
cudaCheckError();
// Launch a kernel to find the location of original particles in the sorted arrays.
// This is faster than using thrust::sort_by_key()
OriginalToSortedD<<<numBlocks, numThreads>>>(
U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(markersProximityD->gridMarkerIndexD), numObjectsH->numAllMarkers);
// Reorder the arrays according to the sorted index of all particles
reorderDataD<<<numBlocks, numThreads>>>(
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->extendedActivityIdD),
U1CAST(markersProximityD->mapOriginalToSorted),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(sortedSphMarkersD->tauXxYyZzD),
mR3CAST(sortedSphMarkersD->tauXyXzYzD), mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD),
numObjectsH->numAllMarkers);
cudaDeviceSynchronize();
cudaCheckError();
}
void ChCollisionSystemFsi::ArrangeData(std::shared_ptr<SphMarkerDataD> otherSphMarkersD) {
sphMarkersD = otherSphMarkersD;
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
ResetCellSize(numCells);
calcHash();
thrust::sort_by_key(markersProximityD->gridMarkerHashD.begin(), markersProximityD->gridMarkerHashD.end(),
markersProximityD->gridMarkerIndexD.begin());
reorderDataAndFindCellStart();
}
} // end namespace fsi
} // end namespace chrono
|
e163c3ae19f0062111fba757e72db4f39d95889c.hip | // !!! This is a file automatically generated by hipify!!!
#include "consts.h"
#include "colors.h"
color * calc_speeds (int max_iterations) {
int i;
color *cols;
hipMallocManaged(&cols, sizeof(color) * max_iterations);
for (i = 0; i < max_iterations; i++) {
speed_to_color(&cols[i], i, max_iterations);
// speed_to_grey(&cols[i], i, max_iterations);
}
printf("generated %d colors (@%p -> %p)\n", max_iterations, &cols[0], &cols[max_iterations - 1]);
return cols;
}
void speed_to_grey (color *c, int n, int iterations) {
unsigned int calc = ((float)n / (float)iterations) * 255.0;
c->r = c->g = c-> b = calc;
c->a = 0;
}
double hue2rgb (double p, double q, double t) {
if (t < 0) {
t += 1;
}
if (t > 1) {
t -= 1;
}
if (t < 0.16) {
return p + (q - p) * 6.0 * t;
}
if (t < 0.5) {
return q;
}
if (t < 0.66) {
return p + (q - p) * (0.66 - t) * 6.0;
}
return p;
}
void speed_to_color (color *c, int speed, int max_iterations) {
// b = log(1) - log(max)
// max = b * log(x)
double max = ((double)max_iterations * 0.5);
double m = (double) (speed * 3.0) / max;
double saturation = 1;
double light = 0.6;
double q = light + saturation - light * saturation;
double p = 2.0 * light - q;
c->r = hue2rgb(p, q, m + 0.15) * 255.0;
c->g = hue2rgb(p, q, m) * 255.0;
c->b = hue2rgb(p, q, m - 0.15) * 255.0;
c->a = 0;
}
void julia_high_red_to_yellow (color *c, int speed, int max_iterations) {
double max = ((double)max_iterations * 0.5);
double m = (double) (speed * 3.0) / max;
double saturation = 1;
double light = 0.6;
double q = light + saturation - light * saturation;
double p = 2.0 * light - q;
c->r = hue2rgb(p, q, m + 0.15) * 255.0;
c->g = hue2rgb(p, q, m) * 255.0;
c->b = hue2rgb(p, q, m - 0.15) * 255.0;
c->a = 0;
}
int same_color (const color *a, const color *b) {
return a->r == b->r && a->g == b->g && a->b == b->b;
}
| e163c3ae19f0062111fba757e72db4f39d95889c.cu | #include "consts.h"
#include "colors.h"
color * calc_speeds (int max_iterations) {
int i;
color *cols;
cudaMallocManaged(&cols, sizeof(color) * max_iterations);
for (i = 0; i < max_iterations; i++) {
speed_to_color(&cols[i], i, max_iterations);
// speed_to_grey(&cols[i], i, max_iterations);
}
printf("generated %d colors (@%p -> %p)\n", max_iterations, &cols[0], &cols[max_iterations - 1]);
return cols;
}
void speed_to_grey (color *c, int n, int iterations) {
unsigned int calc = ((float)n / (float)iterations) * 255.0;
c->r = c->g = c-> b = calc;
c->a = 0;
}
double hue2rgb (double p, double q, double t) {
if (t < 0) {
t += 1;
}
if (t > 1) {
t -= 1;
}
if (t < 0.16) {
return p + (q - p) * 6.0 * t;
}
if (t < 0.5) {
return q;
}
if (t < 0.66) {
return p + (q - p) * (0.66 - t) * 6.0;
}
return p;
}
void speed_to_color (color *c, int speed, int max_iterations) {
// b = log(1) - log(max)
// max = b * log(x)
double max = ((double)max_iterations * 0.5);
double m = (double) (speed * 3.0) / max;
double saturation = 1;
double light = 0.6;
double q = light + saturation - light * saturation;
double p = 2.0 * light - q;
c->r = hue2rgb(p, q, m + 0.15) * 255.0;
c->g = hue2rgb(p, q, m) * 255.0;
c->b = hue2rgb(p, q, m - 0.15) * 255.0;
c->a = 0;
}
void julia_high_red_to_yellow (color *c, int speed, int max_iterations) {
double max = ((double)max_iterations * 0.5);
double m = (double) (speed * 3.0) / max;
double saturation = 1;
double light = 0.6;
double q = light + saturation - light * saturation;
double p = 2.0 * light - q;
c->r = hue2rgb(p, q, m + 0.15) * 255.0;
c->g = hue2rgb(p, q, m) * 255.0;
c->b = hue2rgb(p, q, m - 0.15) * 255.0;
c->a = 0;
}
int same_color (const color *a, const color *b) {
return a->r == b->r && a->g == b->g && a->b == b->b;
}
|
1432ea984ae8bf4d28c5ab025c4c2c3b9a0622a8.hip | // !!! This is a file automatically generated by hipify!!!
/*
SOURCE OPERATOR DEVICE MATRIX OPERATIONS CONTAINS onDEV AND cpu FUNCTIONS
AUTHOR : FABIAN DECHANT / JANNIS SCHRMANN
DATE : 11.08.2020
TO-DO :
CAUTION :
*/
// c++ standard headers
#include <iostream>
// standard c headers
#include <assert.h>
// own headers
#include "../common.h"
#include "matrix_operator.h"
#include "kernel_utils.h"
#include "../global.h"
// cublas headers
#include "rocblas.h"
#include <hip/hip_runtime.h>
//___________________________________________________________________________________________________
// ADD REDUCE
// function on cpu
// dim_red is the dimension which is supposed to be reduced -> the other dimension will remain and should be equal to the size of the given vector
void add_reduce_dim_cpu(const double* mat_in,double *vec_out, int rows,int cols, int dim_red,int dim_vec){
assert(dim_red<2 && (dim_red ? rows : cols)==dim_vec);
memset(vec_out,0,dim_vec*sizeof(double));
if(dim_red==0){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
vec_out[j]+=mat_in[i*cols+j];
}
}
} else if (dim_red==1) {
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
vec_out[i]+=mat_in[i*cols+j];
}
}
}
}
// function onDev
// dim_red is the dimension which is supposed to be reduced -> the other dimension will remain and should be equal to the size of the given vector
void add_reduce_dim_onDev(const double* dev_mat_in,double *dev_vec_out, int rows,int cols, int dim_red,int size_vec){
assert(dim_red<2 && (dim_red ? rows : cols)==size_vec);
CHECK(hipMemset(dev_vec_out, 0, size_vec*sizeof(double)));
if(dim_red==0){
dim3 grid=col_red_grid(cols,rows);
hipLaunchKernelGGL(( add_reduce_cols_kernel), dim3(grid),dim3(get_col_red_block()), 0, 0, dev_mat_in,dev_vec_out,rows,cols);
}else if (dim_red==1){
dim3 grid=row_red_grid(cols,rows);
hipLaunchKernelGGL(( add_reduce_rows_kernel), dim3(grid),dim3(get_row_red_2d_block()), 0, 0, dev_mat_in, dev_vec_out,rows,cols);
}
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
//___________________________________________________________________________________________________
// COMBINE ALONG AXIS
// on cpu
// dim_add is dimension along which the vector should be added. vector should have the size of the other dimension
void add_along_axis_cpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_add, int size_vec){
// assert that dimensions match
assert(dim_add<2 && (dim_add ? rows : cols)==size_vec);
// case add along dimenion 0
if(dim_add==0){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=add_func(mat_in[i*cols+j],vec[j]);
}
}
// case add along dimenion 1
}else if (dim_add==1){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=add_func(mat_in[i*cols+j],vec[i]);
}
}
}
}
// onDev
// dim_add is dimension along which the vector should be added. vector should have the size of the other dimension
void add_along_axis_onDev(const double* dev_mat_in,const double *dev_vec,double* dev_mat_out, int rows,int cols, int dim_add, int size_vec){
// assert dimensions match
assert(dim_add<2 && (dim_add ? rows : cols)==size_vec);
// case add along dimenion 0
if(dim_add==0){
hipLaunchKernelGGL(( func_along_axis_y_kernel), dim3(pointwise2d_grid(cols,rows)),dim3(get_pointwise2d_block()), 0, 0, dev_mat_in,dev_vec,dev_mat_out,rows,cols,ADD);
// case add along dimenion 1
}else if (dim_add==1){
hipLaunchKernelGGL(( func_along_axis_x_kernel), dim3(pointwise2d_grid(cols,rows)),dim3(get_pointwise2d_block()), 0, 0, dev_mat_in,dev_vec,dev_mat_out,rows,cols,ADD);
}
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// add along col direct cpu
// adds directly onto the matrix
void add_along_col_direct_cpu(double* dev_mat,const double* dev_vec,int rows,int cols){
for (int col = 0; col < cols; col++)
for (int row = 0; row < rows; row++)
dev_mat[row*cols+col] += dev_vec[col];
}
//_______________________________________________________________________________________________
// add along cols -> linear forward gpu
// adds directly onto the matrix
void add_along_col_direct_onDev(double* dev_mat,const double* dev_vec,int rows,int cols)
{
dim3 grid=pointwise2d_grid(cols, rows);
hipLaunchKernelGGL(( add_along_col_direct_kernel), dim3(grid),dim3(get_pointwise2d_block()), 0, 0, dev_mat, dev_vec, rows, cols);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// scale along axis cpu
// dim_div is dimension along which the vector should be divided. vector should have the size of the other dimension
void div_along_axis_cpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_div, int size_vec){
assert(dim_div<2 && (dim_div ? rows : cols)==size_vec);
if(dim_div==0){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=div_func(mat_in[i*cols+j],vec[j]);
}
}
}else if (dim_div==1){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=div_func(mat_in[i*cols+j],vec[i]);
}
}
}
}
// scale along axis onDev
// dim_div is dimension along which the vector should be divided. vector should have the size of the other dimension
void div_along_axis_onDev(const double* dev_mat_in,const double *dev_vec,double* dev_mat_out, int rows,int cols, int dim_div, int size_vec){
assert(dim_div<2 && (dim_div ? rows : cols)==size_vec);
dim3 grid=pointwise2d_grid(cols,rows);
if(dim_div==0){
hipLaunchKernelGGL(( func_along_axis_y_kernel), dim3(grid),dim3(get_pointwise2d_block()), 0, 0, dev_mat_in,dev_vec,dev_mat_out,rows,cols,DIV);
}else if (dim_div==1){
hipLaunchKernelGGL(( func_along_axis_x_kernel), dim3(grid),dim3(get_pointwise2d_block()), 0, 0, dev_mat_in,dev_vec,dev_mat_out,rows,cols,DIV);
}
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
//___________________________________________________________________________________________________
// matrix_scalar_cpu
// multiply matrix by a scalar
void matrix_scalar_cpu(double* res,const double* inp,double factor,int size)
{
for (int idx = 0; idx < size; idx++)
res[idx] = inp[idx] * factor;
}
//___________________________________________________________________________________________________
// matrix_transpose_cpu and onDev
// computes the transposed matrix of a double matrix with arbitrary size on cpu
void matrix_transpose_cpu(double* out,
double* inp,
int rows,
int cols)
{
for (int row = 0; row < rows; row++)
for (int col = 0; col < cols; col++)
out[col*rows+row] = inp[row*cols+col];
}
// computes the transposed matrix of a double matrix with arbitrary size on device
void mat_transpose_onDev(const double* dev_mat_in, double* dev_mat_out, int rows, int cols, int threads_block){
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((cols+block.x-1)/block.x,(rows+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( mat_transpose_kernel), dim3(grid), dim3(block), 0, 0, dev_mat_in,dev_mat_out,rows,cols);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
//___________________________________________________________________________________________________
// Combine pointwise
// Hadamard on Dev
void matrix_hadamard_onDev(double* dev_res,const double* dev_lhs,const double* dev_rhs,int size,int threads_block)
{
// calling hadamard kernel
hipLaunchKernelGGL(( comb_pointwise_1d_kernel), dim3(pointwise_grid(size)), dim3(get_pointwise_block()), 0, 0, dev_res, dev_lhs, dev_rhs, size,MUL);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// computes the hadamard matrix product for 2 matrices of type double and same size
void matrix_hadamard_cpu(double* res,
const double* lhs,
const double* rhs,
int size)
{
// loop over all array elements
for (int idx = 0; idx < size; idx++)
res[idx] = mul_func(lhs[idx],rhs[idx]);
}
// Add onDev
void matrix_add_onDev(double* dev_res,
const double* dev_lhs,
const double* dev_rhs,
int size,
int threads_block)
{
// calling add kernel
hipLaunchKernelGGL(( comb_pointwise_1d_kernel), dim3(pointwise_grid(size)), dim3(get_pointwise_block()), 0, 0, dev_res, dev_lhs, dev_rhs, size,ADD);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// computes the addition for two matrices of type double and same size
void matrix_add_cpu(double* res,
const double* lhs,
const double* rhs,
int size)
{
for (int idx = 0; idx < size; idx++)
res[idx] = lhs[idx] + rhs[idx];
}
// computes the Multiply addition for two matrices of type double and double factor and same size on Cpu
void mulAdd_cpu(double* res, const double* lhs, const double* rhs, const double factor, int size)
{
for (int idx = 0; idx < size; idx++)
res[idx] = mulAdd(lhs[idx],rhs[idx],factor);
}
// computes the Multiply addition for two matrices of type double and double factor and same size on Device
void mulAdd_onDev(double *dev_res,const double* dev_lhs,const double* dev_rhs,const double factor,int size,int threads_block)
{
// calling add kernel
hipLaunchKernelGGL(( mulAdd_kernel), dim3(pointwise_grid(size)), dim3(get_pointwise_block()), 0, 0, dev_res, dev_lhs, dev_rhs,factor,size);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// mull add direct on Dev function -> no in between result (lhs is the result)
void mulAdd_direct_onDev(double* dev_lhs,
const double* dev_rhs,
const double factor,
int size,
int threads_block)
{
// calling mulAdd_direct kernel
hipLaunchKernelGGL(( mulAdd_direct_kernel), dim3(pointwise_grid(size)), dim3(get_pointwise_block()), 0, 0, dev_lhs, dev_rhs, factor, size);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// mulAdd direct on cpu
void mulAdd_direct_cpu(double* lhs,
const double* rhs,
const double factor,
int size)
{
for (int idx = 0; idx < size; idx++)
lhs[idx] += rhs[idx]*factor;
}
//__________________________________________________________________________________________________
// Matrix Multiplications
// computes the matrixproduct of double matrices with arbitrary size on host
void matMul(const double *A,const double *B,int M,int N, int K,double *C)
{
double interm_sum;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
interm_sum = 0.;
for (int kk = 0; kk < K; kk++)
interm_sum += A[i*K+kk]*B[kk*N+j];
C[i*N+j] = interm_sum;
}
}
}
// computes the matrix product of double matrices with arbitrary size on device
// naive implementation
void matMul_onDev1(const double *d_A, const double *d_B, int M,int N,int K,double *d_C, int threads_block)
{
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel1), dim3(grid), dim3(block), 0, 0, d_A, d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// naive implementation with transposed matrix A -> coalesced memory access
void matMul_onDev2(const double *d_A, const double *d_B, int M,int N,int K,double *d_C, int threads_block)
{
double *d_A_T;
CHECK(hipMalloc((void**)&d_A_T,M*K*sizeof(double)));
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_A_T ((K+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( mat_transpose_kernel), dim3(grid_A_T), dim3(block), 0, 0, d_A, d_A_T, M, K);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( matMul_kernel2), dim3(grid), dim3(block), 0, 0, d_A_T, d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with dynamic shared memory
void matMul_dsm_onDev(const double *d_A, const double *d_B, int M,int N,int K,double *d_C,int threads_block)
{
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel_dsm), dim3(grid), dim3(block),2*threads_block*sizeof(double), 0, (const double *)d_A, (const double *)d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with dynamic shared memory and coalesced access to global memory
void matMul_dsm_coa_onDev(const double *d_A, const double *d_B, int M,int N,int K,double *d_C,int threads_block)
{
double *d_A_T;
CHECK(hipMalloc((void**)&d_A_T,M*K*sizeof(double)));
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_A_T ((K+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
hipLaunchKernelGGL(( mat_transpose_kernel), dim3(grid_A_T), dim3(block), 0, 0, d_A, d_A_T, M, K);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( matMul_kernel_dsm_coa), dim3(grid), dim3(block),2*threads_block*sizeof(double), 0, (const double *)d_A_T, (const double *)d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
CHECK(hipFree(d_A_T));
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with static shared memory and coalesced access to global memory
void matMul_sm_onDev(const double *d_A, const double *d_B, int M,int N,int K,double *d_C)
{
hipLaunchKernelGGL(( matMul_kernel_sm), dim3(matrix_mul_grid(N,M)), dim3(get_matrix_mul_block()), 0, 0, (const double *)d_A, (const double *)d_B,M,N,K,d_C);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with static shared memory and transposed matrices using if for 4 kernel versions
void matMul_sm_onDev_tr(const double *d_A, const double *d_B,const int A_TRANSP,const int B_TRANSP,const int rows_op_A,const int cols_op_A,const int rows_op_B,const int cols_op_B,double *d_C)
{
// assert matrizes do match
assert(cols_op_A==rows_op_B);
// get matrix dimensions
int rows_A,cols_A,rows_B,cols_B;
if(A_TRANSP){
rows_A=cols_op_A;
cols_A=rows_op_A;
}else{
rows_A=rows_op_A;
cols_A=cols_op_A;
}
if(B_TRANSP){
rows_B=cols_op_B;
cols_B=rows_op_B;
}else{
rows_B=rows_op_B;
cols_B=cols_op_B;
}
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel_sm_tr3), dim3(matrix_mul_grid(cols_op_B,rows_op_A)), dim3(get_matrix_mul_block()), 0, 0, (const double *)d_A, (const double *)d_B,A_TRANSP,B_TRANSP,rows_op_A,cols_op_B,cols_op_A,rows_A,cols_A,rows_B,cols_B,d_C);
// error handling
if(hipDeviceSynchronize()||hipGetLastError()){
printf("Error in matMul_sm_tr_onDev\n");
printf("Matrix Dimensions: M %d,N %d,K %d\n",rows_op_A,cols_op_B,cols_op_A);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with static shared memory and transposed matrices using indexing
void matMul_sm_onDev_tr_ind(const double *d_A, const double *d_B,const int A_TRANSP,const int B_TRANSP,const int rows_op_A,const int cols_op_A,const int rows_op_B,const int cols_op_B,double *d_C)
{
// assert matrizes do match
assert(cols_op_A==rows_op_B);
// get matrix dimensions
int rows_A,cols_A,rows_B,cols_B;
if(A_TRANSP){
rows_A=cols_op_A;
cols_A=rows_op_A;
}else{
rows_A=rows_op_A;
cols_A=cols_op_A;
}
if(B_TRANSP){
rows_B=cols_op_B;
cols_B=rows_op_B;
}else{
rows_B=rows_op_B;
cols_B=cols_op_B;
}
// Invoke kernel
hipLaunchKernelGGL(( matMul_kernel_sm_tr), dim3(matrix_mul_grid(cols_op_B,rows_op_A)), dim3(get_matrix_mul_block()), 0, 0, (const double *)d_A, (const double *)d_B,A_TRANSP,B_TRANSP,rows_op_A,cols_op_B,cols_op_A,rows_A,cols_A,rows_B,cols_B,d_C);
// error handling
if(hipDeviceSynchronize()||hipGetLastError()){
printf("Error in matMul_sm_tr_ind_onDev\n");
printf("Matrix Dimensions: M %d,N %d,K %d\n",rows_op_A,cols_op_B,cols_op_A);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
}
}
void matrix_hadamard_gpu_test_dev(double* res,
const double* lhs,
const double* rhs,
int size,
int threads_block,int op_p_th)
{
// alloc cuda storage
double* d_res;
double* d_lhs;
double* d_rhs;
CHECK(hipMalloc((void**)&d_res, size*sizeof(double)));
CHECK(hipMalloc((void**)&d_lhs, size*sizeof(double)));
CHECK(hipMalloc((void**)&d_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(hipMemcpy(d_lhs, lhs, size*sizeof(double), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_rhs, rhs, size*sizeof(double), hipMemcpyHostToDevice));
// calling ard onDev
int blocks_grid = (size + (threads_block*op_p_th) - 1) / (threads_block*op_p_th);
hipLaunchKernelGGL(( matrix_hadamard_kernel), dim3(blocks_grid), dim3(threads_block), 0, 0, d_res, d_lhs, d_rhs, size);
CHECK(hipDeviceSynchronize());
// moving matrices back from memory
CHECK(hipMemcpy(res, d_res, size*sizeof(double), hipMemcpyDeviceToHost));
// free cuda storage
CHECK(hipFree(d_res));
CHECK(hipFree(d_rhs));
CHECK(hipFree(d_lhs));
}
| 1432ea984ae8bf4d28c5ab025c4c2c3b9a0622a8.cu | /*
SOURCE OPERATOR DEVICE MATRIX OPERATIONS CONTAINS onDEV AND cpu FUNCTIONS
AUTHOR : FABIAN DECHANT / JANNIS SCHÜRMANN
DATE : 11.08.2020
TO-DO :
CAUTION :
*/
// c++ standard headers
#include <iostream>
// standard c headers
#include <assert.h>
// own headers
#include "../common.h"
#include "matrix_operator.h"
#include "kernel_utils.h"
#include "../global.h"
// cublas headers
#include "cublas_v2.h"
#include <cuda_runtime.h>
//___________________________________________________________________________________________________
// ADD REDUCE
// function on cpu
// dim_red is the dimension which is supposed to be reduced -> the other dimension will remain and should be equal to the size of the given vector
void add_reduce_dim_cpu(const double* mat_in,double *vec_out, int rows,int cols, int dim_red,int dim_vec){
assert(dim_red<2 && (dim_red ? rows : cols)==dim_vec);
memset(vec_out,0,dim_vec*sizeof(double));
if(dim_red==0){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
vec_out[j]+=mat_in[i*cols+j];
}
}
} else if (dim_red==1) {
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
vec_out[i]+=mat_in[i*cols+j];
}
}
}
}
// function onDev
// dim_red is the dimension which is supposed to be reduced -> the other dimension will remain and should be equal to the size of the given vector
void add_reduce_dim_onDev(const double* dev_mat_in,double *dev_vec_out, int rows,int cols, int dim_red,int size_vec){
assert(dim_red<2 && (dim_red ? rows : cols)==size_vec);
CHECK(cudaMemset(dev_vec_out, 0, size_vec*sizeof(double)));
if(dim_red==0){
dim3 grid=col_red_grid(cols,rows);
add_reduce_cols_kernel<<<grid,get_col_red_block()>>>(dev_mat_in,dev_vec_out,rows,cols);
}else if (dim_red==1){
dim3 grid=row_red_grid(cols,rows);
add_reduce_rows_kernel<<<grid,get_row_red_2d_block()>>>(dev_mat_in, dev_vec_out,rows,cols);
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
//___________________________________________________________________________________________________
// COMBINE ALONG AXIS
// on cpu
// dim_add is dimension along which the vector should be added. vector should have the size of the other dimension
void add_along_axis_cpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_add, int size_vec){
// assert that dimensions match
assert(dim_add<2 && (dim_add ? rows : cols)==size_vec);
// case add along dimenion 0
if(dim_add==0){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=add_func(mat_in[i*cols+j],vec[j]);
}
}
// case add along dimenion 1
}else if (dim_add==1){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=add_func(mat_in[i*cols+j],vec[i]);
}
}
}
}
// onDev
// dim_add is dimension along which the vector should be added. vector should have the size of the other dimension
void add_along_axis_onDev(const double* dev_mat_in,const double *dev_vec,double* dev_mat_out, int rows,int cols, int dim_add, int size_vec){
// assert dimensions match
assert(dim_add<2 && (dim_add ? rows : cols)==size_vec);
// case add along dimenion 0
if(dim_add==0){
func_along_axis_y_kernel<<<pointwise2d_grid(cols,rows),get_pointwise2d_block()>>>(dev_mat_in,dev_vec,dev_mat_out,rows,cols,ADD);
// case add along dimenion 1
}else if (dim_add==1){
func_along_axis_x_kernel<<<pointwise2d_grid(cols,rows),get_pointwise2d_block()>>>(dev_mat_in,dev_vec,dev_mat_out,rows,cols,ADD);
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// add along col direct cpu
// adds directly onto the matrix
void add_along_col_direct_cpu(double* dev_mat,const double* dev_vec,int rows,int cols){
for (int col = 0; col < cols; col++)
for (int row = 0; row < rows; row++)
dev_mat[row*cols+col] += dev_vec[col];
}
//_______________________________________________________________________________________________
// add along cols -> linear forward gpu
// adds directly onto the matrix
void add_along_col_direct_onDev(double* dev_mat,const double* dev_vec,int rows,int cols)
{
dim3 grid=pointwise2d_grid(cols, rows);
add_along_col_direct_kernel<<<grid,get_pointwise2d_block()>>>(dev_mat, dev_vec, rows, cols);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// scale along axis cpu
// dim_div is dimension along which the vector should be divided. vector should have the size of the other dimension
void div_along_axis_cpu(const double* mat_in,const double *vec,double* mat_out, int rows,int cols, int dim_div, int size_vec){
assert(dim_div<2 && (dim_div ? rows : cols)==size_vec);
if(dim_div==0){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=div_func(mat_in[i*cols+j],vec[j]);
}
}
}else if (dim_div==1){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
mat_out[i*cols+j]=div_func(mat_in[i*cols+j],vec[i]);
}
}
}
}
// scale along axis onDev
// dim_div is dimension along which the vector should be divided. vector should have the size of the other dimension
void div_along_axis_onDev(const double* dev_mat_in,const double *dev_vec,double* dev_mat_out, int rows,int cols, int dim_div, int size_vec){
assert(dim_div<2 && (dim_div ? rows : cols)==size_vec);
dim3 grid=pointwise2d_grid(cols,rows);
if(dim_div==0){
func_along_axis_y_kernel<<<grid,get_pointwise2d_block()>>>(dev_mat_in,dev_vec,dev_mat_out,rows,cols,DIV);
}else if (dim_div==1){
func_along_axis_x_kernel<<<grid,get_pointwise2d_block()>>>(dev_mat_in,dev_vec,dev_mat_out,rows,cols,DIV);
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
//___________________________________________________________________________________________________
// matrix_scalar_cpu
// multiply matrix by a scalar
void matrix_scalar_cpu(double* res,const double* inp,double factor,int size)
{
for (int idx = 0; idx < size; idx++)
res[idx] = inp[idx] * factor;
}
//___________________________________________________________________________________________________
// matrix_transpose_cpu and onDev
// computes the transposed matrix of a double matrix with arbitrary size on cpu
void matrix_transpose_cpu(double* out,
double* inp,
int rows,
int cols)
{
for (int row = 0; row < rows; row++)
for (int col = 0; col < cols; col++)
out[col*rows+row] = inp[row*cols+col];
}
// computes the transposed matrix of a double matrix with arbitrary size on device
void mat_transpose_onDev(const double* dev_mat_in, double* dev_mat_out, int rows, int cols, int threads_block){
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((cols+block.x-1)/block.x,(rows+block.y-1)/block.y);
// Invoke kernel
mat_transpose_kernel<<<grid, block>>>(dev_mat_in,dev_mat_out,rows,cols);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
//___________________________________________________________________________________________________
// Combine pointwise
// Hadamard on Dev
void matrix_hadamard_onDev(double* dev_res,const double* dev_lhs,const double* dev_rhs,int size,int threads_block)
{
// calling hadamard kernel
comb_pointwise_1d_kernel<<<pointwise_grid(size), get_pointwise_block()>>>(dev_res, dev_lhs, dev_rhs, size,MUL);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// computes the hadamard matrix product for 2 matrices of type double and same size
void matrix_hadamard_cpu(double* res,
const double* lhs,
const double* rhs,
int size)
{
// loop over all array elements
for (int idx = 0; idx < size; idx++)
res[idx] = mul_func(lhs[idx],rhs[idx]);
}
// Add onDev
void matrix_add_onDev(double* dev_res,
const double* dev_lhs,
const double* dev_rhs,
int size,
int threads_block)
{
// calling add kernel
comb_pointwise_1d_kernel<<<pointwise_grid(size), get_pointwise_block()>>>(dev_res, dev_lhs, dev_rhs, size,ADD);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// computes the addition for two matrices of type double and same size
void matrix_add_cpu(double* res,
const double* lhs,
const double* rhs,
int size)
{
for (int idx = 0; idx < size; idx++)
res[idx] = lhs[idx] + rhs[idx];
}
// computes the Multiply addition for two matrices of type double and double factor and same size on Cpu
void mulAdd_cpu(double* res, const double* lhs, const double* rhs, const double factor, int size)
{
for (int idx = 0; idx < size; idx++)
res[idx] = mulAdd(lhs[idx],rhs[idx],factor);
}
// computes the Multiply addition for two matrices of type double and double factor and same size on Device
void mulAdd_onDev(double *dev_res,const double* dev_lhs,const double* dev_rhs,const double factor,int size,int threads_block)
{
// calling add kernel
mulAdd_kernel<<<pointwise_grid(size), get_pointwise_block()>>>(dev_res, dev_lhs, dev_rhs,factor,size);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// mull add direct on Dev function -> no in between result (lhs is the result)
void mulAdd_direct_onDev(double* dev_lhs,
const double* dev_rhs,
const double factor,
int size,
int threads_block)
{
// calling mulAdd_direct kernel
mulAdd_direct_kernel<<<pointwise_grid(size), get_pointwise_block()>>>(dev_lhs, dev_rhs, factor, size);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// mulAdd direct on cpu
void mulAdd_direct_cpu(double* lhs,
const double* rhs,
const double factor,
int size)
{
for (int idx = 0; idx < size; idx++)
lhs[idx] += rhs[idx]*factor;
}
//__________________________________________________________________________________________________
// Matrix Multiplications
// computes the matrixproduct of double matrices with arbitrary size on host
void matMul(const double *A,const double *B,int M,int N, int K,double *C)
{
double interm_sum;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
interm_sum = 0.;
for (int kk = 0; kk < K; kk++)
interm_sum += A[i*K+kk]*B[kk*N+j];
C[i*N+j] = interm_sum;
}
}
}
// computes the matrix product of double matrices with arbitrary size on device
// naive implementation
void matMul_onDev1(const double *d_A, const double *d_B, int M,int N,int K,double *d_C, int threads_block)
{
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
matMul_kernel1<<<grid, block>>>(d_A, d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// naive implementation with transposed matrix A -> coalesced memory access
void matMul_onDev2(const double *d_A, const double *d_B, int M,int N,int K,double *d_C, int threads_block)
{
double *d_A_T;
CHECK(cudaMalloc((void**)&d_A_T,M*K*sizeof(double)));
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_A_T ((K+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
mat_transpose_kernel<<<grid_A_T, block>>>(d_A, d_A_T, M, K);
CHECK(cudaDeviceSynchronize());
matMul_kernel2<<<grid, block>>>(d_A_T, d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with dynamic shared memory
void matMul_dsm_onDev(const double *d_A, const double *d_B, int M,int N,int K,double *d_C,int threads_block)
{
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
matMul_kernel_dsm<<<grid, block,2*threads_block*sizeof(double)>>>((const double *)d_A, (const double *)d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with dynamic shared memory and coalesced access to global memory
void matMul_dsm_coa_onDev(const double *d_A, const double *d_B, int M,int N,int K,double *d_C,int threads_block)
{
double *d_A_T;
CHECK(cudaMalloc((void**)&d_A_T,M*K*sizeof(double)));
int block_dim=(int)sqrt(threads_block);
threads_block=block_dim*block_dim;
dim3 block (block_dim,block_dim);
dim3 grid ((N+block.x-1)/block.x,(M+block.y-1)/block.y);
dim3 grid_A_T ((K+block.x-1)/block.x,(M+block.y-1)/block.y);
// Invoke kernel
mat_transpose_kernel<<<grid_A_T, block>>>(d_A, d_A_T, M, K);
CHECK(cudaDeviceSynchronize());
matMul_kernel_dsm_coa<<<grid, block,2*threads_block*sizeof(double)>>>((const double *)d_A_T, (const double *)d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
CHECK(cudaFree(d_A_T));
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with static shared memory and coalesced access to global memory
void matMul_sm_onDev(const double *d_A, const double *d_B, int M,int N,int K,double *d_C)
{
matMul_kernel_sm<<<matrix_mul_grid(N,M), get_matrix_mul_block()>>>((const double *)d_A, (const double *)d_B,M,N,K,d_C);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with static shared memory and transposed matrices using if for 4 kernel versions
void matMul_sm_onDev_tr(const double *d_A, const double *d_B,const int A_TRANSP,const int B_TRANSP,const int rows_op_A,const int cols_op_A,const int rows_op_B,const int cols_op_B,double *d_C)
{
// assert matrizes do match
assert(cols_op_A==rows_op_B);
// get matrix dimensions
int rows_A,cols_A,rows_B,cols_B;
if(A_TRANSP){
rows_A=cols_op_A;
cols_A=rows_op_A;
}else{
rows_A=rows_op_A;
cols_A=cols_op_A;
}
if(B_TRANSP){
rows_B=cols_op_B;
cols_B=rows_op_B;
}else{
rows_B=rows_op_B;
cols_B=cols_op_B;
}
// Invoke kernel
matMul_kernel_sm_tr3<<<matrix_mul_grid(cols_op_B,rows_op_A), get_matrix_mul_block()>>>((const double *)d_A, (const double *)d_B,A_TRANSP,B_TRANSP,rows_op_A,cols_op_B,cols_op_A,rows_A,cols_A,rows_B,cols_B,d_C);
// error handling
if(cudaDeviceSynchronize()||cudaGetLastError()){
printf("Error in matMul_sm_tr_onDev\n");
printf("Matrix Dimensions: M %d,N %d,K %d\n",rows_op_A,cols_op_B,cols_op_A);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
}
// computes the matrix product of double matrices with arbitrary size on device
// tiled implementation with static shared memory and transposed matrices using indexing
void matMul_sm_onDev_tr_ind(const double *d_A, const double *d_B,const int A_TRANSP,const int B_TRANSP,const int rows_op_A,const int cols_op_A,const int rows_op_B,const int cols_op_B,double *d_C)
{
// assert matrizes do match
assert(cols_op_A==rows_op_B);
// get matrix dimensions
int rows_A,cols_A,rows_B,cols_B;
if(A_TRANSP){
rows_A=cols_op_A;
cols_A=rows_op_A;
}else{
rows_A=rows_op_A;
cols_A=cols_op_A;
}
if(B_TRANSP){
rows_B=cols_op_B;
cols_B=rows_op_B;
}else{
rows_B=rows_op_B;
cols_B=cols_op_B;
}
// Invoke kernel
matMul_kernel_sm_tr<<<matrix_mul_grid(cols_op_B,rows_op_A), get_matrix_mul_block()>>>((const double *)d_A, (const double *)d_B,A_TRANSP,B_TRANSP,rows_op_A,cols_op_B,cols_op_A,rows_A,cols_A,rows_B,cols_B,d_C);
// error handling
if(cudaDeviceSynchronize()||cudaGetLastError()){
printf("Error in matMul_sm_tr_ind_onDev\n");
printf("Matrix Dimensions: M %d,N %d,K %d\n",rows_op_A,cols_op_B,cols_op_A);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
}
}
void matrix_hadamard_gpu_test_dev(double* res,
const double* lhs,
const double* rhs,
int size,
int threads_block,int op_p_th)
{
// alloc cuda storage
double* d_res;
double* d_lhs;
double* d_rhs;
CHECK(cudaMalloc((void**)&d_res, size*sizeof(double)));
CHECK(cudaMalloc((void**)&d_lhs, size*sizeof(double)));
CHECK(cudaMalloc((void**)&d_rhs, size*sizeof(double)));
// moving matrices to device
CHECK(cudaMemcpy(d_lhs, lhs, size*sizeof(double), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_rhs, rhs, size*sizeof(double), cudaMemcpyHostToDevice));
// calling ard onDev
int blocks_grid = (size + (threads_block*op_p_th) - 1) / (threads_block*op_p_th);
matrix_hadamard_kernel<<<blocks_grid, threads_block>>>(d_res, d_lhs, d_rhs, size);
CHECK(cudaDeviceSynchronize());
// moving matrices back from memory
CHECK(cudaMemcpy(res, d_res, size*sizeof(double), cudaMemcpyDeviceToHost));
// free cuda storage
CHECK(cudaFree(d_res));
CHECK(cudaFree(d_rhs));
CHECK(cudaFree(d_lhs));
}
|
0328bc52b0052da36e8b70b9d86911504e9d88c5.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA Library for Skeleton 2-1/2D Electromagnetic GPU PIC Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
extern int nblock_size;
extern int maxgsx;
static hipError_t crc;
/*--------------------------------------------------------------------*/
__device__ void liscan2(int *isdata, int nths) {
/* performs local prefix reduction of integer data shared by threads */
/* using binary tree method. */
/* local data */
int l, mb, kxs, lb, kb;
l = threadIdx.x;
mb = l;
kxs = 1;
while (kxs < nths) {
lb = kxs*mb;
kb = 2*lb + kxs - 1;
lb += l + kxs;
if (lb < nths) {
isdata[lb] += isdata[kb];
}
__syncthreads();
mb >>= 1;
kxs <<= 1;
}
return;
}
/*--------------------------------------------------------------------*/
__device__ void lsum2(float *sdata, int n) {
/* finds local sum of nths data items shared by threads */
/* using binary tree method. input is modified. */
/* local data */
int l, k;
float s;
l = threadIdx.x;
k = blockDim.x >> 1;
s = 0.0f;
if (l < n) s = sdata[l];
while (k > 0) {
if (l < k) {
if ((l+k) < n) {
s += sdata[l+k];
sdata[l] = s;
}
}
__syncthreads();
k >>= 1;
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpubppush23l(float ppart[], float fxy[], float bxy[],
int kpic[], float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
threaded version using guard cells
data read in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm, nm;
float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* to conserve memory, sek overlaps with sfxy and sbxy */
/* and the name sfxy is used instead of sek */
float *sbxy;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
double sum1;
qtmh = 0.5f*qbm*dt;
sum1 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+npoff+nppmx];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+npoff+nppmx*2] + dx;
acy = ppart[j+npoff+nppmx*3] + dy;
acz = ppart[j+npoff+nppmx*4] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+npoff+nppmx*2] = dx;
ppart[j+npoff+nppmx*3] = dy;
ppart[j+npoff+nppmx*4] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* normalize kinetic energy of tile */
if (threadIdx.x==0) {
ek[k] = 0.5f*sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpubppushf23l(float ppart[], float fxy[], float bxy[],
int kpic[], int ncl[], int ihole[],
float qbm, float dt, float dtc, float *ek,
int idimp, int nppmx, int nx, int ny,
int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
with periodic boundary conditions.
also determines list of particles which are leaving this tile
threaded version using guard cells
data read in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm, nm;
float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* int sih[blockDim.x], sncl[8], nh[1]; */
/* to conserve memory, sek overlaps with sfxy, sbxy, and sih */
/* and the name sfxy is used instead of sek */
float *sbxy;
int *sncl, *sih, *nh;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
sih = (int *)&sfxy[6*(mx+1)*(my+1)];
sncl = (int *)&sih[blockDim.x];
nh = (int *)&sfxy[blockDim.x];
sncl = sncl > nh ? sncl : nh;
nh = (int *)&sncl[8];
double sum1;
qtmh = 0.5f*qbm*dt;
anx = (float) nx;
any = (float) ny;
sum1 = 0.0;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
nn += 1;
mm += 1;
/* load local fields from global array */
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+2*nppmx+npoff] = dx;
ppart[j+3*nppmx+npoff] = dy;
ppart[j+4*nppmx+npoff] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
/* normalize kinetic energy of tile */
ek[k] = 0.5f*sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpurbppush23l(float ppart[], float fxy[], float bxy[],
int kpic[], float qbm, float dt,
float dtc, float ci, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
threaded version using guard cells
data read in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprical of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm, nm;
float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg;
float omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* to conserve memory, sek overlaps with sfxy and sbxy */
/* and the name sfxy is used instead of sek */
float *sbxy;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
double sum1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
sum1 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+npoff+nppmx];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+npoff+nppmx*2] + dx;
acy = ppart[j+npoff+nppmx*3] + dy;
acz = ppart[j+npoff+nppmx*4] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+npoff+nppmx*2] = dx;
ppart[j+npoff+nppmx*3] = dy;
ppart[j+npoff+nppmx*4] = dz;
/* update inverse gamma */
p2 = dx*dx + dy*dy + dz*dz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + dx*dtg;
dy = y + dy*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* normalize kinetic energy of tile */
if (threadIdx.x==0) {
ek[k] = sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpurbppushf23l(float ppart[], float fxy[], float bxy[],
int kpic[], int ncl[], int ihole[],
float qbm, float dt, float dtc, float ci,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
with periodic boundary conditions.
Using the Boris Mover.
also determines list of particles which are leaving this tile
threaded version using guard cells
data read in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprical of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm, nm;
float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* int sih[blockDim.x], sncl[8], nh[1]; */
/* to conserve memory, sek overlaps with sfxy, sbxy, and sih */
/* and the name sfxy is used instead of sek */
float *sbxy;
int *sncl, *sih, *nh;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
sih = (int *)&sfxy[6*(mx+1)*(my+1)];
sncl = (int *)&sih[blockDim.x];
nh = (int *)&sfxy[blockDim.x];
sncl = sncl > nh ? sncl : nh;
nh = (int *)&sncl[8];
double sum1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
sum1 = 0.0;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
nn += 1;
mm += 1;
/* load local fields from global array */
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+2*nppmx+npoff] = dx;
ppart[j+3*nppmx+npoff] = dy;
ppart[j+4*nppmx+npoff] = dz;
/* update inverse gamma */
p2 = dx*dx + dy*dy + dz*dz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + dx*dtg;
dy = y + dy*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
/* normalize kinetic energy of tile */
ek[k] = sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2ppost2l(float ppart[], float q[], int kpic[],
float qm, int nppmx, int idimp, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
threaded version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm, np, mp;
float dxp, dyp, amx, amy;
/* The size of the shared memory array is as follows: */
/* float sq[(mx+1)*(my+1)] */
extern __shared__ float sq[];
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
i = threadIdx.x;
while (i < mxv*(my+1)) {
sq[i] = 0.0f;
i += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
dxp = ppart[j+npoff];
nn = dxp;
dyp = ppart[j+npoff+nppmx];
mm = dyp;
dxp = qm*(dxp - (float) nn);
dyp = dyp - (float) mm;
nn = nn - noff;
mm = mxv*(mm - moff);
amx = qm - dxp;
mp = mm + mxv;
amy = 1.0f - dyp;
np = nn + 1;
/* deposit charge within tile to local accumulator */
/* original deposit charge, has data hazard on GPU */
/* sq[np+mp] += dxp*dyp; */
/* sq[nn+mp] += amx*dyp; */
/* sq[np+mm] += dxp*amy; */
/* sq[nn+mm] += amx*amy; */
/* for devices with compute capability 2.x */
atomicAdd(&sq[np+mp],dxp*dyp);
atomicAdd(&sq[nn+mp],amx*dyp);
atomicAdd(&sq[np+mm],dxp*amy);
atomicAdd(&sq[nn+mm],amx*amy);
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* deposit charge to global array */
nn = mxv < nxv-noff ? mxv : nxv-noff;
mm = my+1 < nyv-moff ? my+1 : nyv-moff;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original deposit charge, has data hazard on GPU */
/* q[i+noff+nxv*(j+moff)] += sq[ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&q[i+noff+nxv*(j+moff)],sq[ii]);
}
ii += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2jppost2l(float ppart[], float cu[], int kpic[],
float qm, float dt, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
threaded version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm;
float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
/* The size of the shared memory array is as follows: */
/* float scu[3*(mx+1)*(my+1)] */
extern __shared__ float scu[];
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+npoff+nppmx];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2jppostf2l(float ppart[], float cu[], int kpic[],
int ncl[], int ihole[], float qm,
float dt, int nppmx, int idimp, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
threaded version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm;
float dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float anx, any, edgelx, edgely, edgerx, edgery;
/* The sizes of the shared memory arrays are as follows: */
/* float scu[3*(mx+1)*(my+1)]; */
/* int sncl[8], sih[blockDim.x], nh[1]; */
int *sncl, *sih, *nh;
extern __shared__ float scu[];
sncl = (int *)&scu[3*(mx+1)*(my+1)];
sih = (int *)&sncl[8];
nh = (int *)&sih[blockDim.x];
anx = (float) nx;
any = (float) ny;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+npoff+nppmx];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2rjppost2l(float ppart[], float cu[], int kpic[],
float qm, float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
threaded version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprical of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm;
float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
/* The size of the shared memory array is as follows: */
/* float scu[3*(mx+1)*(my+1)] */
extern __shared__ float scu[];
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+nppmx+npoff];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2rjppostf2l(float ppart[], float cu[], int kpic[],
int ncl[], int ihole[], float qm,
float dt, float ci, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax,
int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
threaded version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprical of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm;
float ci2, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
float anx, any, edgelx, edgely, edgerx, edgery;
/* The sizes of the shared memory arrays are as follows: */
/* float scu[3*(mx+1)*(my+1)]; */
/* int sncl[8], sih[blockDim.x], nh[1]; */
int *sncl, *sih, *nh;
extern __shared__ float scu[];
sncl = (int *)&scu[3*(mx+1)*(my+1)];
sih = (int *)&sncl[8];
nh = (int *)&sih[blockDim.x];
anx = (float) nx;
any = (float) ny;
ci2 = ci*ci;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+npoff+nppmx];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucaguard2l(float2 qc[], float q[], int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* copy and accumulate extended periodic scalar field q
into complex output field qc
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of input field array q, must be >= nx+1
nye = second dimension of input field array q, must be >= ny+1
nxvh = first dimension of output field array qc, must be >= nx/2+1
nyv = second dimension of output field array qc, must be >= ny */
/* local data */
int j, k, nxh;
float at1, at2;
float2 a;
nxh = nx/2;
k = blockIdx.x;
/* copy interior points */
if (k < ny) {
j = threadIdx.x;
at2 = 0.0f;
while (j < nxh) {
if (k==0) {
at1 = q[2*j+nxe*ny];
at2 = q[2*j+1+nxe*ny];
if (j==0) {
at1 += q[nx] + q[nx+nxe*ny];
}
}
if (k > 0) {
at1 = 0.0f;
if (j==0) {
at1 = q[nx+nxe*k];
}
}
a.x = q[2*j+nxe*k] + at1;
a.y = q[2*j+1+nxe*k] + at2;
qc[j+nxvh*k] = a;
j += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucacguard2l(float2 cuc[], float cu[], int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* copy and accumulate extended periodic vector field cu
into complex output field cuc
linear interpolation
nx/ny = system length in x/y direction
nxe = second dimension of input field array cu, must be >= nx+1
nye = third dimension of input field array cu, must be >= ny+1
nxvh = first dimension of output field array cuc, must be >= nx/2+1
nyv = third dimension of output field array cuc, must be >= ny */
/* local data */
int j, k, nxh;
float at1, at2, at3, at4, at5, at6;
float2 a;
nxh = nx/2;
k = blockIdx.x;
/* copy interior points */
if (k < ny) {
at2 = 0.0f;
at4 = 0.0f;
at6 = 0.0f;
j = threadIdx.x;
while (j < nxh) {
if (k==0) {
at1 = cu[3*(2*j+nxe*ny)];
at2 = cu[3*(2*j+1+nxe*ny)];
at3 = cu[1+3*(2*j+nxe*ny)];
at4 = cu[1+3*(2*j+1+nxe*ny)];
at5 = cu[2+3*(2*j+nxe*ny)];
at6 = cu[2+3*(2*j+1+nxe*ny)];
if (j==0) {
at1 += cu[3*nx] + cu[3*(nx+nxe*ny)];
at3 += cu[1+3*nx] + cu[1+3*(nx+nxe*ny)];
at5 += cu[2+3*nx] + cu[2+3*(nx+nxe*ny)];
}
}
if (k > 0) {
at1 = 0.0f;
at3 = 0.0f;
at5 = 0.0f;
if (j==0) {
at1 = cu[3*(nx+nxe*k)];
at3 = cu[1+3*(nx+nxe*k)];
at5 = cu[2+3*(nx+nxe*k)];
}
}
a.x = cu[3*(2*j+nxe*k)] + at1;
a.y = cu[3*(2*j+1+nxe*k)] + at2;
cuc[j+nxvh*3*k] = a;
a.x = cu[1+3*(2*j+nxe*k)] + at3;
a.y = cu[1+3*(2*j+1+nxe*k)] + at4;
cuc[j+nxvh*(1+3*k)] = a;
a.x = cu[2+3*(2*j+nxe*k)] + at5;
a.y = cu[2+3*(2*j+1+nxe*k)] + at6;
cuc[j+nxvh*(2+3*k)] = a;
j += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucbguard2l(float2 bxyc[], float bxy[], int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* copy and replicate complex input 2d vector field bxyc
into extended periodic field bxy
linear interpolation
nx/ny = system length in x/y direction
nxe = second dimension of input field array bxy, must be >= nx+1
nye = third dimension of input field array bxy, must be >= ny+1
nxvh = first dimension of input field array bxyc, must be >= nx/2+1
nyv = third dimension of input field array bxyc, must be >= ny */
/* local data */
int j, k, nxh;
float2 a, b, c;
nxh = nx/2;
k = blockIdx.x;
/* copy interior points */
if (k < ny) {
j = threadIdx.x;
while (j < nxh) {
a = bxyc[j+nxvh*3*k];
b = bxyc[j+nxvh*(1+3*k)];
c = bxyc[j+nxvh*(2+3*k)];
bxy[3*(2*j+nxe*k)] = a.x;
bxy[1+3*(2*j+nxe*k)] = b.x;
bxy[2+3*(2*j+nxe*k)] = c.x;
bxy[3*(2*j+1+nxe*k)] = a.y;
bxy[1+3*(2*j+1+nxe*k)] = b.y;
bxy[2+3*(2*j+1+nxe*k)] = c.y;
j += blockDim.x;
}
}
/* accumulate edges of extended field */
if (blockIdx.x==0) {
k = threadIdx.x;
while (k < ny) {
a = bxyc[nxvh*3*k];
b = bxyc[nxvh*(1+3*k)];
c = bxyc[nxvh*(2+3*k)];
bxy[3*(nx+nxe*k)] = a.x;
bxy[1+3*(nx+nxe*k)] = b.x;
bxy[2+3*(nx+nxe*k)] = c.x;
k += blockDim.x;
}
j = threadIdx.x;
while (j < nxh) {
a = bxyc[j];
b = bxyc[j+nxvh];
c = bxyc[j+2*nxvh];
bxy[3*(2*j+nxe*ny)] = a.x;
bxy[1+3*(2*j+nxe*ny)] = b.x;
bxy[2+3*(2*j+nxe*ny)] = c.x;
bxy[3*(2*j+1+nxe*ny)] = a.y;
bxy[1+3*(2*j+1+nxe*ny)] = b.y;
bxy[2+3*(2*j+1+nxe*ny)] = c.y;
j += blockDim.x;
}
if (threadIdx.x==0) {
a = bxyc[0];
b = bxyc[nxvh];
c = bxyc[nxvh*2];
bxy[3*(nx+nxe*ny)] = a.x;
bxy[1+3*(nx+nxe*ny)] = b.x;
bxy[2+3*(nx+nxe*ny)] = c.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuppfnd2l(float ppart[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx,
int ny, int mx, int my, int mx1, int my1,
int ntmax, int *irc) {
/* this subroutine performs first step of a particle sort by x,y grid
in tiles of mx, my, where one finds the particles leaving tile and
stores their number, location, and destination in ncl and ihole.
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
input: all except ncl, ihole, irc
output: ppart, ncl, ihole, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npp, j, k, ih, ist, nn, mm, nths;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
/* The sizes of the shared memory arrays are as follows: */
/* int sncl[8], sih[blockDim.x], nh[1]; */
int *sncl, *sih, *nh;
extern __shared__ int shm[];
sncl = (int *)&shm[0];
sih = (int *)&shm[8];
nh = (int *)&shm[8+blockDim.x];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
mm = (npp - 1)/(int) blockDim.x + 1;
noff = 0;
for (nn = 0; nn < mm; nn++) {
j = threadIdx.x + blockDim.x*nn;
sih[threadIdx.x] = 0;
if (j < npp) {
dx = ppart[j+nppmx*(idimp*k)];
dy = ppart[j+nppmx*(1+idimp*k)];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+nppmx*(idimp*k)] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0f;
ppart[j+nppmx*(idimp*k)] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx*(1+idimp*k)] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0f;
ppart[j+nppmx*(1+idimp*k)] = dy;
}
else {
ist += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (ist > 0) {
atomicAdd(&sncl[ist-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nths = npp - blockDim.x*nn;
if (nths > blockDim.x)
nths = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nths);
if (j < npp) {
ih = sih[threadIdx.x];
moff = 0;
if (threadIdx.x > 0)
moff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > moff) {
ih += noff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nths > 0)
noff += sih[nths-1];
/* synchronize threads */
__syncthreads();
}
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = noff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuppmov2l(float ppart[], float ppbuff[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1,
int my1, int npbmx, int ntmax, int *irc) {
/* this subroutine performs second step of a particle sort by x,y grid
in tiles of mx, my, where prefix scan of ncl is performed and
departing particles are buffered in ppbuff in direction order.
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
input: all except ppbuff, irc
output: ppbuff, ncl, irc
ppart[k][i][n] = i co-ordinate of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, i, j, k, ii, nh, ist, j1, ierr;
/* The sizes of the shared memory arrays are as follows: */
/* int sncl[8], ip[1]; */
/* blockDim.x should be >= 8 */
int *sncl, *ip;
extern __shared__ int shm[];
sncl = (int *)&shm[0];
ip = (int *)&shm[8];
mxy1 = mx1*my1;
ierr = 0;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
j = threadIdx.x;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
if (k < mxy1) {
/* find address offset for ordered ppbuff array */
if (j < 8) {
ist = ncl[j+8*k];
sncl[j] = ist;
}
if (threadIdx.x==0)
ip[0] = 0;
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sncl,8);
if (j < 8)
sncl[j] -= ist;
/* synchronize threads */
__syncthreads();
nh = ihole[2*(ntmax+1)*k];
/* loop over particles leaving tile */
while (j < nh) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = atomicAdd(&sncl[ist-1],1);
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*(i+idimp*k)]
= ppart[j1+nppmx*(i+idimp*k)];
}
}
else {
ip[0] = 1;
}
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* write out counters */
j = threadIdx.x;
if (j < 8) {
ncl[j+8*k] = sncl[j];
}
/* set error */
if (threadIdx.x==0) {
if (ip[0] > 0)
ierr = ierr > sncl[7] ? ierr : sncl[7];
}
}
/* ppbuff overflow */
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuppord2l(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* this subroutine performs third step of a particle sort by x,y grid
in tiles of mx, my, where incoming particles from other tiles are
copied into ppart.
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
input: all except irc
output: ppart, kpic, irc
ppart[k][i][n] = i co-ordinate of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, npp, ncoff, i, j, k, ii, jj, kx, ky, ni, nh;
int nn, mm, ll, ip, j1, j2, kxl, kxr, kk, kl, kr;
int nths;
/* The sizes of the shared memory arrays are as follows: */
/* int ks[8], sip[8], sj[blockDim.x], sj1[1], ist[1]; */
int *ks, *sip, *sj, *sj1, *ist;
extern __shared__ int shm[];
ks = (int *)&shm[0];
sip = (int *)&shm[8];
sj = (int *)&shm[16];
sj1 = (int *)&shm[16+blockDim.x];
ist = (int *)&shm[17+blockDim.x];
mxy1 = mx1*my1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
if (k < mxy1) {
npp = kpic[k];
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
if (threadIdx.x==0) {
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
sj1[0] = 0;
ist[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* find number of incoming particles */
kk = 0;
ncoff = 0;
ip = 0;
ii = threadIdx.x;
if (ii < 8) {
kk = ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*kk];
ip = ncl[ii+8*kk] - ncoff;
kk = ncoff + idimp*npbmx*kk;
sip[ii] = ip;
}
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sip,8);
ni = sip[7];
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
j1 = 0;
mm = (ni - 1)/(int) blockDim.x + 1;
for (nn = 0; nn < mm; nn++) {
j = threadIdx.x + blockDim.x*nn;
sj[threadIdx.x] = 0;
if (threadIdx.x==0)
sj[0] = sj1[0];
/* synchronize threads */
__syncthreads();
/* calculate offset for reading from particle buffer */
if (ii < 8) {
/* mark next location where direction ii changes */
jj = sip[ii] - blockDim.x*nn;
if ((jj >= 0) && (jj < blockDim.x)) {
if (ip > 0)
sj[jj] -= kk + ip;
}
}
/* synchronize threads */
__syncthreads();
/* calculate offset for reading from particle buffer */
if (ii < 8) {
/* mark location where direction ii starts */
jj -= ip;
if ((jj >= 0) && (jj < blockDim.x)) {
if (ip > 0)
sj[jj] += kk;
}
}
nths = ni - blockDim.x*nn;
if (nths > blockDim.x)
nths = blockDim.x;
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sj,nths);
/* save last value for next time */
if (threadIdx.x==0) {
jj = 0;
if (nths > 0)
jj = sj[nths-1];
sj1[0] = jj;
}
if (j < ni) {
/* insert incoming particles into holes */
if (j < nh) {
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + (j - nh);
}
if (j1 < nppmx) {
jj = sj[threadIdx.x];
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*(i+idimp*k)]
= ppbuff[j+jj+npbmx*i];
}
}
else {
ist[0] = 1;
}
}
/* synchronize threads */
__syncthreads();
}
/* update particle number if all holes have been filled */
jj = ni - nh;
if (jj > 0)
npp += jj;
/* fill up remaining holes in particle array with particles from end */
ip = nh - ni;
if (ip > 0) {
mm = (ip - 1)/(int) blockDim.x + 1;
kk = 0;
ll = 0;
/* loop over holes */
for (nn = 0; nn < mm; nn++) {
j = threadIdx.x + blockDim.x*nn;
/* j1 = locations of particles to fill holes, in decreasing order */
j1 = 0;
if (j < ip) {
j1 = npp - j - 1;
}
/* j2 = locations of holes at the end, in decreasing order */
j2 = 0;
jj = nh - ll - threadIdx.x;
if (jj > 0) {
j2 = ihole[2*(jj+(ntmax+1)*k)] - 1;
}
/* holes with locations greater than npp-ip do not need to be filled */
/* identify such holes */
sj[threadIdx.x] = 1;
/* synchronize threads */
__syncthreads();
/* omit particles at end that are holes */
ii = npp - (j2 + blockDim.x*nn) - 1;
if ((ii >= 0) && (ii < blockDim.x))
sj[ii] = 0;
nths = ip - blockDim.x*nn;
if (nths > blockDim.x)
nths = blockDim.x;
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sj,nths);
/* ii = number particles at end to be moved */
ii = 0;
if (nths > 0)
ii = sj[nths-1];
/* identify which particles at end to be moved */
if (ii < nths) {
ncoff = 0;
if (j < ip) {
if (threadIdx.x > 0)
ncoff = sj[threadIdx.x-1];
jj = sj[threadIdx.x];
}
/* synchronize threads */
__syncthreads();
if (j < ip) {
if (jj > ncoff) {
sj[jj-1] = j1;
}
}
/* synchronize threads */
__syncthreads();
}
/* j2 = locations of holes to be filled in increasing order */
j2 = 0;
if (j < ip) {
j1 = npp - j - 1;
jj = threadIdx.x + ni + kk + 1;
if (jj <= nh)
j2 = ihole[2*(jj+(ntmax+1)*k)] - 1;
}
/* move particles from end into remaining holes */
if (j < (ii+blockDim.x*nn)) {
if (ii < nths)
j1 = sj[threadIdx.x];
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*(i+idimp*k)]
= ppart[j1+nppmx*(i+idimp*k)];
}
}
/* accumulate number of holes filled */
kk += ii;
/* accumulate number of holes skipped over */
ii = nths - ii;
ll += ii;
}
/* update number of particles */
npp -= ip;
}
/* set error and update particle */
if (threadIdx.x==0) {
/* ppart overflow */
if (ist[0] > 0)
*irc = npp;
kpic[k] = npp;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpupois23t(float2 qt[], float2 fxyt[], float2 ffct[],
float *we, int nx, int ny, int nxvh, int nyv,
int nxhd, int nyhd) {
/* this subroutine solves 2d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions, without packed data.
Zeros out z component.
vector length is second dimension
input: qt,ffct,nx,ny,nxvh,nyv,nxhd,nyhd, output: fxyt,we
approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
equation used is:
fx[kx][ky] = -sqrt(-1)*kx*g[kx][ky]*s[kx][ky]*q[kx][ky],
fy[kx][ky] = -sqrt(-1)*ky*g[kx][ky]*s[kx][ky]*q[kx][ky],
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[kx][ky] = (affp/(kx**2+ky**2))*s[kx][ky],
s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and
fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0.
qt[j][k] = complex charge density for fourier mode (k,j)
fxyt[j][0][k] = x component of complex force/charge,
fxyt[j][1][k] = y component of complex force/charge,
fxyt[j][2][k] = z component of complex force/charge,
all for fourier mode (k,j)
caimag(ffct[j][k]) = finite-size particle shape factor s
creal(ffct([j][k])) = potential green's function g
for fourier mode (k,j)
electric field energy is also calculated, using
we = nx*ny*sum((affp/(kx**2+ky**2))*|q[kx][ky]*s[kx][ky]|**2)
nx/ny = system length in x/y direction
nxvh = second dimension of field arrays, must be >= nxh+1
nyv = first dimension of field arrays, must be >= ny
nxhd = second dimension of form factor array, must be >= nxh
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, nxh1, j, k, k1, jj, jk, jk3;
float dnx, dny, dkx, at1, at2, at3, at4;
float2 zero, zt1, zt2, zt3;
/* The size of the shared memory array is as follows: */
/* float ss[blockDim.x]; */
extern __shared__ float ss[];
double wp;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero.x = 0.0f;
zero.y = 0.0f;
/* calculate force/charge and sum field energy */
wp = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
jj = nyhd*j;
jk = nyv*j;
jk3 = 3*jk;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = zt1.x*zt1.y;
at2 = at1*dkx;
at3 = at1*dny*(float) k;
zt1 = qt[k+jk];
at4 = zt1.x;
zt1.x = zt1.y;
zt1.y = -at4;
zt2 = qt[k1+jk];
at4 = zt2.x;
zt2.x = zt2.y;
zt2.y = -at4;
zt3.x = at2*zt1.x;
zt3.y = at2*zt1.y;
fxyt[k+jk3] = zt3;
zt3.x = at3*zt1.x;
zt3.y = at3*zt1.y;
fxyt[k+nyv+jk3] = zt3;
fxyt[k+2*nyv+jk3] = zero;
zt3.x = at2*zt2.x;
zt3.y = at2*zt2.y;
fxyt[k1+jk3] = zt3;
zt3.x = -at3*zt2.x;
zt3.y = -at3*zt2.y;
fxyt[k1+nyv+jk3] = zt3;
fxyt[k1+2*nyv+jk3] = zero;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y));
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jj = nyhd*j;
jk = nyv*j;
jk3 = 3*jk;
zt1 = ffct[jj];
at1 = zt1.x*zt1.y;
at2 = at1*dnx*(float) j;
zt1 = qt[jk];
at4 = zt1.x;
zt3.x = at2*zt1.y;
zt3.y = -at2*at4;
fxyt[jk3] = zt3;
fxyt[nyv+jk3] = zero;
fxyt[2*nyv+jk3] = zero;
fxyt[k1+jk3] = zero;
fxyt[k1+nyv+jk3] = zero;
fxyt[k1+2*nyv+jk3] = zero;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y));
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k];
at1 = zt1.x*zt1.y;
at3 = at1*dny*(float) k;
zt1 = qt[k];
at4 = zt1.x;
zt3.x = at3*zt1.y;
zt3.y = -at3*at4;
fxyt[k] = zero;
fxyt[k+nyv] = zt3;
fxyt[k+2*nyv] = zero;
fxyt[k1] = zero;
zt3.y = -zt3.y;
fxyt[k1+nyv] = zt3;
fxyt[k1+2*nyv] = zero;
fxyt[k+nxh1] = zero;
fxyt[k+nyv+nxh1] = zero;
fxyt[k+2*nyv+nxh1] = zero;
fxyt[k1+nxh1] = zero;
fxyt[k1+nyv+nxh1] = zero;
fxyt[k1+2*nyv+nxh1] = zero;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y));
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
fxyt[0] = zero;
fxyt[nyv] = zero;
fxyt[2*nyv] = zero;
fxyt[k1] = zero;
fxyt[k1+nyv] = zero;
fxyt[k1+2*nyv] = zero;
fxyt[nxh1] = zero;
fxyt[nxh1+nyv] = zero;
fxyt[nxh1+2*nyv] = zero;
fxyt[k1+nxh1] = zero;
fxyt[k1+nyv+nxh1] = zero;
fxyt[k1+2*nyv+nxh1] = zero;
}
}
j = blockIdx.x;
if (j <= nxh) {
/* sum potential energies for each x co-ordinate */
ss[threadIdx.x] = (float) wp;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize potential energy for each x co-ordinate */
if (threadIdx.x==0)
we[j] = ss[0]*((float) (nx*ny));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucuperp2t(float2 cut[], int nx, int ny, int nxvh,
int nyv) {
/* this subroutine calculates the transverse current in fourier space
without packed data.
input: all, output: cut
approximate flop count is: 36*nxc*nyc
and nxc*nyc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1
the transverse current is calculated using the equation:
cux[ky][kx] = cux[ky][kx]
-kx*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
cuy[ky][kx] = cuy[ky][kx]
-ky*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0,
and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0.
cut[j][i][k] = complex current density for fourier mode (k,j)
nx/ny = system length in x/y direction
nxvh = third dimension of current array, must be >= nxh
nyv = first dimension of current array, must be >= ny
local data */
int nxh, nyh, nxh1, j, k, k1, jk3;
float dnx, dny, dkx, dkx2, dky, at1;
float2 zero, zt1, zt2, zt3;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero.x = 0.0f;
zero.y = 0.0f;
/* calculate transverse part of current */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
dkx2 = dkx*dkx;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
dky = dny*(float) k;
at1 = 1.0f/(dkx2 + dky*dky);
zt1 = cut[k+jk3];
zt2.x = dkx*zt1.x;
zt2.y = dkx*zt1.y;
zt3 = cut[k+nyv+jk3];
zt2.x = at1*(zt2.x + dky*zt3.x);
zt2.y = at1*(zt2.y + dky*zt3.y);
zt1.x -= dkx*zt2.x;
zt1.y -= dkx*zt2.y;
zt3.x -= dky*zt2.x;
zt3.y -= dky*zt2.y;
cut[k+jk3] = zt1;
cut[k+nyv+jk3] = zt3;
zt1 = cut[k1+jk3];
zt2.x = dkx*zt1.x;
zt2.y = dkx*zt1.y;
zt3 = cut[k1+nyv+jk3];
zt2.x = at1*(zt2.x - dky*zt3.x);
zt2.y = at1*(zt2.y - dky*zt3.y);
zt1.x -= dkx*zt2.x;
zt1.y -= dkx*zt2.y;
zt3.x += dky*zt2.x;
zt3.y += dky*zt2.y;
cut[k1+jk3] = zt1;
cut[k1+nyv+jk3] = zt3;
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jk3 = 3*nyv*j;
cut[jk3] = zero;
cut[k1+jk3] = zero;
cut[k1+nyv+jk3] = zero;
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = cut[k];
cut[k+nyv] = zero;
zt1.y = -zt1.y;
cut[k1] = zt1;
cut[k1+nyv] = zero;
cut[k+nxh1] = zero;
cut[k+nyv+nxh1] = zero;
cut[k1+nxh1] = zero;
cut[k1+nyv+nxh1] = zero;
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
cut[0] = zero;
cut[nyv] = zero;
cut[k1] = zero;
cut[k1+nyv] = zero;
cut[nxh1] = zero;
cut[nyv+nxh1] = zero;
cut[k1+nxh1] = zero;
cut[k1+nyv+nxh1] = zero;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuibpois23t(float2 cut[], float2 bxyt[], float2 ffct[],
float ci, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
magnetic field, with periodic boundary conditions,
without packed data.
input: cut,ffct,ci,nx,ny,nxv,nyhd, output: bxyt,wm
approximate flop count is: 90*nxc*nyc + 40*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is calculated using the equations:
bx[kx][ky] = ci*ci*sqrt(-1)*g[kx][ky]*ky*cuz[[kx][ky],
by[kx][ky] = -ci*ci*sqrt(-1)*g[kx][ky]*kx*cuz[kx][ky],
bz[kx][ky] = ci*ci*sqrt(-1)*g[kx][ky]*(kx*cuy[kx][ky]-ky*cux[kx][ky]),
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[kx][ky] = (affp/(kx**2+ky**2))*s[ky][kx],
s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = bx(ky=pi) = by(ky=pi) = bz(ky=pi)
= 0, and bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0.
cut[j][i][k] = complex current density for fourier mode (k,j)
bxyt[j][i][k] = i component of complex magnetic field
all for fourier mode (k,j)
cimag(ffct[j][k]) = finite-size particle shape factor s
creal(ffct[j][k]) = potential green's function g
for fourier mode (j,k)
ci = reciprical of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci*
|cu[kx][ky]*s[kx][ky]|**2), where
affp = normalization constant = nx*ny/np, where np=number of particles
this expression is valid only if the current is divergence-free
nx/ny = system length in x/y direction
nxvh = third dimension of field arrays, must be >= nxh
nyv = first dimension of field arrays, must be >= ny
nxhd = second dimension of form factor array, must be >= nxh
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, nxh1,j, k, k1, jj, jk3;
float dnx, dny, dkx, ci2, at1, at2, at3, at4;
float2 zero, zt1, zt2, zt3;
/* The size of the shared memory array is as follows: */
/* float ss[blockDim.x]; */
extern __shared__ float ss[];
double wp;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero.x = 0.0f;
zero.y = 0.0f;
ci2 = ci*ci;
/* calculate magnetic field and sum field energy */
wp = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = ci2*zt1.x;
at2 = dkx*at1;
at3 = at1*dny*(float) k;
at1 = at1*zt1.y;
zt1 = cut[k+2*nyv+jk3];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt2 = cut[k+nyv+jk3];
at4 = zt2.x;
zt2.x = -zt2.y;
zt2.y = at4;
zt3 = cut[k+jk3];
at4 = zt3.x;
zt3.x = -zt3.y;
zt3.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y + zt3.x*zt3.x + zt3.y*zt3.y));
zt3.x = at2*zt2.x - at3*zt3.x;
zt3.y = at2*zt2.y - at3*zt3.y;
zt2.x = -at2*zt1.x;
zt2.y = -at2*zt1.y;
zt1.x = at3*zt1.x;
zt1.y = at3*zt1.y;
bxyt[k+jk3] = zt1;
bxyt[k+nyv+jk3] = zt2;
bxyt[k+2*nyv+jk3] = zt3;
zt1 = cut[k1+2*nyv+jk3];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt2 = cut[k1+nyv+jk3];
at4 = zt2.x;
zt2.x = -zt2.y;
zt2.y = at4;
zt3 = cut[k1+jk3];
at4 = zt3.x;
zt3.x = -zt3.y;
zt3.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y + zt3.x*zt3.x + zt3.y*zt3.y));
zt3.x = at2*zt2.x + at3*zt3.x;
zt3.y = at2*zt2.y + at3*zt3.y;
zt2.x = -at2*zt1.x;
zt2.y = -at2*zt1.y;
zt1.x = -at3*zt1.x;
zt1.y = -at3*zt1.y;
bxyt[k1+jk3] = zt1;
bxyt[k1+nyv+jk3] = zt2;
bxyt[k1+2*nyv+jk3] = zt3;
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
at1 = ci2*zt1.x;
at2 = at1*dnx*(float) j;
at1 = at1*zt1.y;
zt1 = cut[2*nyv+jk3];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt2 = cut[nyv+jk3];
at4 = zt2.x;
zt2.x = -zt2.y;
zt2.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y));
zt3.x = at2*zt2.x;
zt3.y = at2*zt2.y;
zt2.x = -at2*zt1.x;
zt2.y = -at2*zt1.y;
bxyt[jk3] = zero;
bxyt[nyv+jk3] = zt2;
bxyt[2*nyv+jk3] = zt3;
bxyt[k1+jk3] = zero;
bxyt[k1+nyv+jk3] = zero;
bxyt[k1+2*nyv+jk3] = zero;
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k];
at1 = ci2*zt1.x;
at3 = at1*dny*(float) k;
at1 = at1*zt1.y;
zt1 = cut[k+2*nyv];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt3 = cut[k];
at4 = zt3.x;
zt3.x = -zt3.y;
zt3.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt3.x*zt3.x + zt3.y*zt3.y));
zt3.x = -at3*zt3.x;
zt3.y = -at3*zt3.y;
zt1.x = at3*zt1.x;
zt1.y = at3*zt1.y;
bxyt[k] = zt1;
bxyt[k+nyv] = zero;
bxyt[k+2*nyv] = zt3;
zt1.y = -zt1.y;
zt3.y = -zt3.y;
bxyt[k1] = zt1;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zt3;
bxyt[k+nxh1] = zero;
bxyt[k+nyv+nxh1] = zero;
bxyt[k+2*nyv+nxh1] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
bxyt[0] = zero;
bxyt[nyv] = zero;
bxyt[2*nyv] = zero;
bxyt[k1] = zero;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zero;
bxyt[nxh1] = zero;
bxyt[nxh1+nyv] = zero;
bxyt[nxh1+2*nyv] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
}
}
j = blockIdx.x;
if (j <= nxh) {
/* sum magnetic energies for each x co-ordinate */
ss[threadIdx.x] = (float) wp;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize magnetic energy for each x co-ordinate */
if (threadIdx.x==0)
wm[j] = ss[0]*((float) (nx*ny));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpumaxwel2t(float2 exyt[], float2 bxyt[], float2 cut[],
float2 ffct[], float ci, float dt,
float *wf, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d maxwell's equation in fourier space for
transverse electric and magnetic fields with periodic boundary
conditions, without packed data.
input: all, output: wf, wm, exy, bxy
approximate flop count is: 286*nxc*nyc + 84*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is first updated half a step using the equations:
bx[kx][ky] = bx[kx][ky] - .5*dt*sqrt(-1)*ky*ez[kx][ky]
by[kx][ky] = by[kx][ky] + .5*dt*sqrt(-1)*kx*ez[kx][ky]
bz[kx][ky] = bz[kx][ky] - .5*dt*sqrt(-1)*(kx*ey[kx][ky]-ky*ex[kx][ky])
the electric field is then updated a whole step using the equations:
ex[kx][ky] = ex[kx][ky] + c2*dt*sqrt(-1)*ky*bz[kx][ky]
- affp*dt*cux[kx][ky]*s[kx][ky]
ey[kx][ky] = ey[kx][ky] - c2*dt*sqrt(-1)*kx*bz[kx][ky]
- affp*dt*cuy[kx][ky]*s[kx][ky]
ez[kx][ky] = ez[kx][ky] + c2*dt*sqrt(-1)*(kx*by[kx][ky]-ky*bx[kx][ky])
- affp*dt*cuz[kx][ky]*s[kx][ky]
the magnetic field is finally updated the remaining half step with
the new electric field and the previous magnetic field equations.
where kx = 2pi*j/nx, ky = 2pi*k/ny, c2 = 1./(ci*ci)
and s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)
j,k = fourier mode numbers, except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0,
ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0.
and similarly for bx, by, bz.
cut[j][i][k] = complex current density
exyt[j][i][k] = complex transverse electric field
bxyt[j][i][k] = complex magnetic field
for component i, all for fourier mode (k,j)
creal(ffct[0][0]) = affp = normalization constant = nx*ny/np,
where np=number of particles
cimag(ffct[j][k]) = finite-size particle shape factor s,
s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2)
for fourier mode (k,j)
ci = reciprical of velocity of light
dt = time interval between successive calculations
transverse electric field energy is also calculated, using
wf = nx*ny**sum((1/affp)*|exy[kx][ky]|**2)
magnetic field energy is also calculated, using
wm = nx*ny**sum((c2/affp)*|bxy[kx][ky]|**2)
nx/ny = system length in x/y direction
nxvh = third dimension of field arrays, must be >= nxh
nyv = first dimension of field arrays, must be >= ny
nxhd = second dimension of form factor array, must be >= nxh
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, nxh1, j, k, k1, jj, jk3;
float dnx, dny, dth, c2, cdt, affp, anorm, dkx, dky, afdt, adt;
float2 zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9;
float2 ct1, ct2, ct3;
/* The size of the shared memory array is as follows: */
/* float ss[blockDim.x]; */
extern __shared__ float ss[];
double wp, ws;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dth = 0.5f*dt;
c2 = 1.0f/(ci*ci);
cdt = c2*dt;
zt1 = ffct[0];
affp = zt1.x;
adt = affp*dt;
zero.x = 0.0f;
zero.y = 0.0f;
anorm = 1.0f/affp;
/* update electromagnetic field and sum field energies */
ws = 0.0;
wp = 0.0;
/* calculate the electromagnetic fields */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
dky = dny*(float) k;
zt1 = ffct[k+jj];
afdt = adt*zt1.y;
/* update magnetic field half time step, ky > 0 */
ct3 = exyt[k+2*nyv+jk3];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct2 = exyt[k+nyv+jk3];
zt2.x = -ct2.y;
zt2.y = ct2.x;
ct1 = exyt[k+jk3];
zt3.x = -ct1.y;
zt3.y = ct1.x;
zt4 = bxyt[k+jk3];
zt5 = bxyt[k+nyv+jk3];
zt6 = bxyt[k+2*nyv+jk3];
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x - dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y - dky*zt3.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt2.x = -zt5.y;
zt2.y = zt5.x;
zt3.x = -zt4.y;
zt3.y = zt4.x;
zt7 = cut[k+jk3];
zt8 = cut[k+nyv+jk3];
zt9 = cut[k+2*nyv+jk3];
zt7.x = ct1.x + cdt*(dky*zt1.x) - afdt*zt7.x;
zt7.y = ct1.y + cdt*(dky*zt1.y) - afdt*zt7.y;
zt8.x = ct2.x - cdt*(dkx*zt1.x) - afdt*zt8.x;
zt8.y = ct2.y - cdt*(dkx*zt1.y) - afdt*zt8.y;
zt9.x = ct3.x + cdt*(dkx*zt2.x - dky*zt3.x) - afdt*zt9.x;
zt9.y = ct3.y + cdt*(dkx*zt2.y - dky*zt3.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt2.x = -zt8.y;
zt2.y = zt8.x;
zt3.x = -zt7.y;
zt3.y = zt7.x;
exyt[k+jk3] = zt7;
exyt[k+nyv+jk3] = zt8;
exyt[k+2*nyv+jk3] = zt9;
ws += (double) (anorm*(zt7.x*zt7.x + zt7.y*zt7.y
+ zt8.x*zt8.x + zt8.y*zt8.y + zt9.x*zt9.x + zt9.y*zt9.y));
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x - dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y - dky*zt3.y);
bxyt[k+jk3] = zt4;
bxyt[k+nyv+jk3] = zt5;
bxyt[k+2*nyv+jk3] = zt6;
wp += (double) (anorm*(zt4.x*zt4.x + zt4.y*zt4.y
+ zt5.x*zt5.x + zt5.y*zt5.y + zt6.x*zt6.x + zt6.y*zt6.y));
/* update magnetic field half time step, ky < 0 */
ct3 = exyt[k1+2*nyv+jk3];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct2 = exyt[k1+nyv+jk3];
zt2.x = -ct2.y;
zt2.y = ct2.x;
ct1 = exyt[k1+jk3];
zt3.x = -ct1.y;
zt3.y = ct1.x;
zt4 = bxyt[k1+jk3];
zt5 = bxyt[k1+nyv+jk3];
zt6 = bxyt[k1+2*nyv+jk3];
zt4.x += dth*(dky*zt1.x);
zt4.y += dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x + dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y + dky*zt3.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt2.x = -zt5.y;
zt2.y = zt5.x;
zt3.x = -zt4.y;
zt3.y = zt4.x;
zt7 = cut[k1+jk3];
zt8 = cut[k1+nyv+jk3];
zt9 = cut[k1+2*nyv+jk3];
zt7.x = ct1.x - cdt*(dky*zt1.x) - afdt*zt7.x;
zt7.y = ct1.y - cdt*(dky*zt1.y) - afdt*zt7.y;
zt8.x = ct2.x - cdt*(dkx*zt1.x) - afdt*zt8.x;
zt8.y = ct2.y - cdt*(dkx*zt1.y) - afdt*zt8.y;
zt9.x = ct3.x + cdt*(dkx*zt2.x + dky*zt3.x) - afdt*zt9.x;
zt9.y = ct3.y + cdt*(dkx*zt2.y + dky*zt3.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt2.x = -zt8.y;
zt2.y = zt8.x;
zt3.x = -zt7.y;
zt3.y = zt7.x;
exyt[k1+jk3] = zt7;
exyt[k1+nyv+jk3] = zt8;
exyt[k1+2*nyv+jk3] = zt9;
ws += (double) (anorm*(zt7.x*zt7.x + zt7.y*zt7.y
+ zt8.x*zt8.x + zt8.y*zt8.y + zt9.x*zt9.x + zt9.y*zt9.y));
zt4.x += dth*(dky*zt1.x);
zt4.y += dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x + dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y + dky*zt3.y);
bxyt[k1+jk3] = zt4;
bxyt[k1+nyv+jk3] = zt5;
bxyt[k1+2*nyv+jk3] = zt6;
wp += (double) (anorm*(zt4.x*zt4.x + zt4.y*zt4.y
+ zt5.x*zt5.x + zt5.y*zt5.y + zt6.x*zt6.x + zt6.y*zt6.y));
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
dkx = dnx*(float) j;
afdt = adt*zt1.y;
/* update magnetic field half time step */
ct3 = exyt[2*nyv+jk3];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct2 = exyt[nyv+jk3];
zt2.x = -ct2.y;
zt2.y = ct2.x;
zt5 = bxyt[nyv+jk3];
zt6 = bxyt[2*nyv+jk3];
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x);
zt6.y -= dth*(dkx*zt2.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt2.x = -zt5.y;
zt2.y = zt5.x;
zt8 = cut[nyv+jk3];
zt9 = cut[2*nyv+jk3];
zt8.x = ct2.x - cdt*(dkx*zt1.x) - afdt*zt8.x;
zt8.y = ct2.y - cdt*(dkx*zt1.y) - afdt*zt8.y;
zt9.x = ct3.x + cdt*(dkx*zt2.x) - afdt*zt9.x;
zt9.y = ct3.y + cdt*(dkx*zt2.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt2.x = -zt8.y;
zt2.y = zt8.x;
exyt[jk3] = zero;
exyt[nyv+jk3] = zt8;
exyt[2*nyv+jk3] = zt9;
ws += (double) (anorm*(zt8.x*zt8.x + zt8.y*zt8.y
+ zt9.x*zt9.x + zt9.y*zt9.y));
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x);
zt6.y -= dth*(dkx*zt2.y);
bxyt[jk3] = zero;
bxyt[nyv+jk3] = zt5;
bxyt[2*nyv+jk3] = zt6;
wp += (double) (anorm*(zt5.x*zt5.x + zt5.y*zt5.y
+ zt6.x*zt6.x + zt6.y*zt6.y));
bxyt[k1+jk3] = zero;
bxyt[k1+nyv+jk3] = zero;
bxyt[k1+2*nyv+jk3] = zero;
exyt[k1+jk3] = zero;
exyt[k1+nyv+jk3] = zero;
exyt[k1+2*nyv+jk3] = zero;
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
dky = dny*(float) k;
zt1 = ffct[k];
afdt = adt*zt1.y;
/* update magnetic field half time step */
ct3 = exyt[k+2*nyv];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct1 = exyt[k];
zt3.x = -ct1.y;
zt3.y = ct1.x;
zt4 = bxyt[k];
zt6 = bxyt[k+2*nyv];
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt6.x += dth*(dky*zt3.x);
zt6.y += dth*(dky*zt3.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt3.x = -zt4.y;
zt3.y = zt4.x;
zt7 = cut[k];
zt9 = cut[k+2*nyv];
zt7.x = ct1.x + cdt*(dky*zt1.x) - afdt*zt7.x;
zt7.y = ct1.y + cdt*(dky*zt1.y) - afdt*zt7.y;
zt9.x = ct3.x - cdt*(dky*zt3.x) - afdt*zt9.x;
zt9.y = ct3.y - cdt*(dky*zt3.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt3.x = -zt7.y;
zt3.y = zt7.x;
exyt[k] = zt7;
exyt[k+nyv] = zero;
exyt[k+2*nyv] = zt9;
ws += (double) (anorm*(zt7.x*zt7.x + zt7.y*zt7.y
+ zt9.x*zt9.x + zt9.y*zt9.y));
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt6.x += dth*(dky*zt3.x);
zt6.y += dth*(dky*zt3.y);
bxyt[k] = zt4;
bxyt[k+nyv] = zero;
bxyt[k+2*nyv] = zt6;
wp += (double) (anorm*(zt4.x*zt4.x + zt4.y*zt4.y
+ zt6.x*zt6.x + zt6.y*zt6.y));
zt4.y = -zt4.y;
zt6.y = -zt6.y;
zt7.y = -zt7.y;
zt9.y = -zt9.y;
bxyt[k1] = zt4;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zt6;
exyt[k1] = zt7;
exyt[k1+nyv] = zero;
exyt[k1+2*nyv] = zt9;
bxyt[k+nxh1] = zero;
bxyt[k+nyv+nxh1] = zero;
bxyt[k+2*nyv+nxh1] = zero;
exyt[k+nxh1] = zero;
exyt[k+nyv+nxh1] = zero;
exyt[k+2*nyv+nxh1] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
exyt[k1+nxh1] = zero;
exyt[k1+nyv+nxh1] = zero;
exyt[k1+2*nyv+nxh1] = zero;
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
bxyt[0] = zero;
bxyt[nyv] = zero;
bxyt[2*nyv] = zero;
exyt[0] = zero;
exyt[nyv] = zero;
exyt[2*nyv] = zero;
bxyt[k1] = zero;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zero;
exyt[k1] = zero;
exyt[k1+nyv] = zero;
exyt[k1+2*nyv] = zero;
bxyt[nxh1] = zero;
bxyt[nyv+nxh1] = zero;
bxyt[2*nyv+nxh1] = zero;
exyt[nxh1] = zero;
exyt[nyv+nxh1] = zero;
exyt[2*nyv+nxh1] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
exyt[k1+nxh1] = zero;
exyt[k1+nyv+nxh1] = zero;
exyt[k1+2*nyv+nxh1] = zero;
}
}
j = blockIdx.x;
if (j <= nxh) {
/* sum transverse electric field energies for each x co-ordinate */
ss[threadIdx.x] = (float) ws;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize transverse electric field energy for each x co-ordinate */
if (threadIdx.x==0)
wf[j] = ss[0]*((float) (nx*ny));
/* sum magnetic energies for each x co-ordinate */
ss[threadIdx.x] = (float) wp;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize magnetic energy for each x co-ordinate */
if (threadIdx.x==0)
wm[j] = c2*ss[0]*((float) (nx*ny));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuemfield2t(float2 fxyt[], float2 exyt[],
float2 ffct[], int isign, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine either adds complex vector fields if isign > 0
or copies complex vector fields if isign < 0
includes additional smoothing
local data */
int i, j, k, nxh, nyh, nxh1, k1, jj, jk3;
float at1;
float2 zero, zt1, zt2;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nxh1 = 3*nyv*nxh;
zero.x = 0.0f;
zero.y = 0.0f;
/* add the fields */
if (isign > 0) {
/* for (j = 0; j < nxh; j++) { */
j = blockIdx.x;
if (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[k+nyv*i+jk3];
zt2 = fxyt[k+nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[k+nyv*i+jk3] = zt2;
zt1 = exyt[k1+nyv*i+jk3];
zt2 = fxyt[k1+nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt2;
}
}
k += blockDim.x;
}
}
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[nyv*i+jk3];
zt2 = fxyt[nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[nyv*i+jk3] = zt2;
zt1 = exyt[k1+nyv*i+jk3];
zt2 = fxyt[k1+nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt2;
}
j += blockDim.x;
}
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
for (i = 0; i < 3; i++) {
fxyt[k+nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
for (i = 0; i < 3; i++) {
fxyt[nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
}
}
/* copy the fields */
else if (isign < 0) {
/* for (j = 0; j < nxh; j++) { */
j = blockIdx.x;
if (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[k+nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[k+nyv*i+jk3] = zt1;
zt1 = exyt[k1+nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt1;
}
}
k += blockDim.x;
}
}
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[nyv*i+jk3] = zt1;
zt1 = exyt[k1+nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt1;
}
j += blockDim.x;
}
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
for (i = 0; i < 3; i++) {
fxyt[k+nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
for (i = 0; i < 3; i++) {
fxyt[nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuctpose4(float2 f[], float2 g[], int nx, int ny,
int nxv, int nyv) {
/* complex transpose using blocking algorithm with gaps */
/* local data */
int j, k, js, ks, joff, koff, mx, mxv;
/* The size of the shared memory array is as follows: */
/* float2 shm2[(mx + 1)*mx]; */
extern __shared__ float2 shm2[];
mx = blockDim.x;
mxv = mx + 1;
joff = mx*blockIdx.x;
koff = mx*blockIdx.y;
js = threadIdx.x;
ks = threadIdx.y;
/* copy into block */
j = js + joff;
k = ks + koff;
if ((j < nx) && (k < ny)) {
shm2[js+mxv*ks] = f[j+nxv*k];
}
__syncthreads();
/* copy out from block */
j = ks + joff;
k = js + koff;
if ((j < nx) && (k < ny)) {
g[k+nyv*j] = shm2[ks+mxv*js];
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuctpose4n(float2 fn[], float2 gn[], int nx, int ny,
int ndim, int nxv, int nyv) {
/* complex vector transpose using blocking algorithm with gaps */
/* ndim = vector dimension */
/* local data */
int i, j, k, js, ks, joff, koff, mx, mxv, nmxv, nnxv, nnyv, jj, kk;
/* The size of the shared memory array is as follows: */
/* float2 shmn2[ndim*(mx + 1)*mx]; */
extern __shared__ float2 shmn2[];
mx = blockDim.x;
mxv = mx + 1;
joff = mx*blockIdx.x;
koff = mx*blockIdx.y;
js = threadIdx.x;
ks = threadIdx.y;
nmxv = ndim*mxv;
nnxv = ndim*nxv;
nnyv = ndim*nyv;
/* copy into block */
j = js + joff;
k = ks + koff;
if ((j < nx) && (k < ny)) {
jj = j + nnxv*k;
kk = js + nmxv*ks;
for (i = 0; i < ndim; i++) {
shmn2[kk+mxv*i] = fn[jj+nxv*i];
}
}
__syncthreads();
/* copy out from block */
j = ks + joff;
k = js + koff;
if ((j < nx) && (k < ny)) {
kk = k + nnyv*j;
jj = ks + nmxv*js;
for (i = 0; i < ndim; i++) {
gn[kk+nyv*i] = shmn2[jj+mxv*i];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpufft2rcxs(float2 f[], int isign, int mixup[],
float2 sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd,
int nxyhd, int nsize) {
/* this subroutine performs the x part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic.
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in x is performed
f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx))
if isign = 1, a forward fourier transform in x is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f >= nx/2+1
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
nsize = amount of scratch complex memory used
fourier coefficients are stored as follows:
f[k][j].x, f[k][j].y = real, imaginary part of mode j,k, where
0 <= j < nx/2+1 and 0 <= k < ny
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, jj, kk;
int n, nn, in, nt, nh;
float ani, at1, at2;
float2 t1, t2, t3;
/* The size of the shared memory array is as follows: */
/* float2 s[nsize]; */
extern __shared__ float2 s[];
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
/* calculate extent of shared memory usage: */
/* nn = size of shared memory in x */
nn = nxh;
in = 0;
while (nn > nsize) {
nn = nn/2;
in += 1;
}
/* nt = number of iterations in x */
nt = 1L<<in;
in = indx1 - in;
nh = nn/2;
/* inverse fourier transform */
if (isign < 0) {
/* bit-reverse array elements in x */
nrx = nxhy/nxh;
/* for (k = nyi-1; k < nyt; k++) { */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
j1 = (mixup[j] - 1)/nrx;
if (j < j1) {
t1 = f[j1+jj];
f[j1+jj] = f[j+jj];
f[j+jj] = t1;
}
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* copy data to local memory */
nrx = nxy/nxh;
/* for (i = nyi-1; i < nyt; i++) { */
i = blockIdx.x + nyi - 1;
if (i < nyt) {
jj = nxhd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = f[kk+nn*n+jj];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in x */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
f[kk+nn*n+jj] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in x */
ns = 1L<<in;
for (l = in; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nxhh; kk++) { */
kk = threadIdx.x;
while (kk < nxhh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = f[j2+jj];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = f[j1+jj];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
f[j2+jj] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
f[j1+jj] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5f/(((float) nx)*((float) ny));
/* for (k = nyi-1; k < nyt; k++) */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 1; j < nxhh; j++) { */
j = threadIdx.x;
while (j < nxhh) {
if (j > 0) {
t3 = sct[kmr*j];
at1 = t3.y;
at2 = -t3.x;
t2 = f[nxh-j+jj];
t2.y = -t2.y;
t3 = f[j+jj];
t1.x = t3.x + t2.x;
t1.y = t3.y + t2.y;
t3.x -= t2.x;
t3.y -= t2.y;
t2.x = t3.x*at1 - t3.y*at2;
t2.y = t3.x*at2 + t3.y*at1;
t3.x = ani*(t1.x + t2.x);
t3.y = ani*(t1.y + t2.y);
f[j+jj] = t3;
t3.x = ani*(t1.x - t2.x);
t3.y = ani*(t2.y - t1.y);
f[nxh-j+jj] = t3;
}
j += blockDim.x;
}
if (threadIdx.x==0) {
ani = 2.0f*ani;
t3 = f[nxhh+jj];
t3.x = ani*t3.x;
t3.y = -ani*t3.y;
f[nxhh+jj] = t3;
t3 = f[jj];
at1 = t3.x;
at2 = t3.y;
t3.x = ani*(at1 - at2);
t3.y = 0.0f;
f[nxh+jj] = t3;
t3.x = ani*(at1 + at2);
f[jj] = t3;
}
/* synchronize threads */
__syncthreads();
}
}
/* forward fourier transform */
if (isign > 0) {
/* scramble coefficients */
kmr = nxy/nx;
/* for (k = nyi-1; k < nyt; k++) { */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 1; j < nxhh; j++) { */
j = threadIdx.x;
while (j < nxhh) {
if (j > 0) {
t3 = sct[kmr*j];
at1 = t3.y;
at2 = t3.x;
t2 = f[nxh-j+jj];
t2.y = -t2.y;
t3 = f[j+jj];
t1.x = t3.x + t2.x;
t1.y = t3.y + t2.y;
t3.x -= t2.x;
t3.y -= t2.y;
t2.x = t3.x*at1 - t3.y*at2;
t2.y = t3.x*at2 + t3.y*at1;
t3.x = t1.x + t2.x;
t3.y = t1.y + t2.y;
f[j+jj] = t3;
t3.x = t1.x - t2.x;
t3.y = t2.y - t1.y;
f[nxh-j+jj] = t3;
}
j += blockDim.x;
}
if (threadIdx.x==0) {
t3 = f[nxhh+jj];
t3.x = 2.0f*t3.x;
t3.y = -2.0f*t3.y;
f[nxhh+jj] = t3;
t3 = f[jj];
at1 = t3.x;
t3 = f[nxh+jj];
at2 = t3.x;
t3.x = at1 + at2;
t3.y = at1 - at2;
f[jj] = t3;
}
/* synchronize threads */
__syncthreads();
}
/* bit-reverse array elements in x */
nrx = nxhy/nxh;
/* for (k = nyi-1; k < nyt; k++) { */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
j1 = (mixup[j] - 1)/nrx;
if (j < j1) {
t1 = f[j1+jj];
f[j1+jj] = f[j+jj];
f[j+jj] = t1;
}
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* copy data to local memory */
nrx = nxy/nxh;
/* for (i = nyi-1; i < nyt; i++) { */
i = blockIdx.x + nyi - 1;
if (i < nyt) {
jj = nxhd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = f[kk+nn*n+jj];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in x */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
f[kk+nn*n+jj] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in x */
ns = 1L<<in;
for (l = in; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nxhh; kk++) { */
kk = threadIdx.x;
while (kk < nxhh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = f[j2+jj];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = f[j1+jj];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
f[j2+jj] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
f[j1+jj] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpufft2rcys(float2 g[], int isign, int mixup[],
float2 sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd,
int nxyhd, int nsize) {
/* this subroutine performs the y part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of x,
using complex arithmetic, with data not packed
for isign = (-1,1), input: all, output: g
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in y is performed
g[n][m] = sum(g[j][k]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in y is performed
g[j][k] = sum(g[n][m]*exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = second dimension of g >= nx/2+1
nyd = first dimension of g >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
nsize = amount of scratch complex memory used
fourier coefficients are stored as follows:
g[j][k] = real, imaginary part of mode j,k, where
0 <= j < nx/2+1 and 0 <= k < ny
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, koff, kk;
int n, nn, in, nt, nh;
float at1, at2;
float2 t1, t2, t3;
/* The size of the shared memory array is as follows: */
/* float2 s[nsize]; */
extern __shared__ float2 s[];
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
/* calculate extent of shared memory usage: */
/* nn = size of shared memory in y */
nn = ny;
in = 0;
while (nn > nsize) {
nn = nn/2;
in += 1;
}
/* nt = number of iterations in y */
nt = 1L<<in;
in = indy - in;
nh = nn/2;
/* bit-reverse array elements in y */
nry = nxhy/ny;
/* for (j = nxi-1; j < nxt; j++) { */
j = blockIdx.x + nxi - 1;
if (j < nxt) {
kk = nyd*j;
/* for (k = 0; k < ny; k++) { */
k = threadIdx.x;
while (k < ny) {
k1 = (mixup[k] - 1)/nry;
if (k < k1) {
t1 = g[k1+kk];
g[k1+kk] = g[k+kk];
g[k+kk] = t1;
}
k += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
nry = nxy/ny;
/* inverse fourier transform in y */
if (isign < 0) {
/* copy data to local memory */
/* for (i = nxi-1; i < nxt; i++) { */
i = blockIdx.x + nxi - 1;
if (i < nxt) {
koff = nyd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = g[kk+nn*n+koff];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in y */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
g[kk+nn*n+koff] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in y */
ns = 1L<<in;
for (l = in; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nyh; kk++) { */
kk = threadIdx.x;
while (kk < nyh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = g[j2+koff];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = g[j1+koff];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
g[j2+koff] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
g[j1+koff] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
}
/* forward fourier transform in y */
if (isign > 0) {
/* copy data to local memory */
/* for (i = nxi-1; i < nxt; i++) { */
i = blockIdx.x + nxi - 1;
if (i < nxt) {
koff = nyd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = g[kk+nn*n+koff];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in y */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
g[kk+nn*n+koff] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in y */
ns = 1L<<in;
for (l = in; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nyh; kk++) { */
kk = threadIdx.x;
while (kk < nyh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = g[j2+koff];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = g[j1+koff];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
g[j2+koff] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
g[j1+koff] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpusum1(float a[], float *sa, int nx) {
/* 1d serial sum reduction */
/* nx = length of data */
/* sa = sum(a) */
/* local data */
int j, js, jb, mx, joff, mxm;
float t;
/* The size of the shared memory array is as follows: */
/* ss[blockDim.x]; */
extern __shared__ float ss[];
mx = blockDim.x;
js = threadIdx.x;
jb = blockIdx.x;
joff = mx*jb;
j = js + joff;
/* copy global data to shared memory */
if (j < nx) ss[js] = a[j];
/* synchronize to make sure each thread in block has the data */
__syncthreads();
if (js==0) {
mxm = nx - joff;
if (mxm > mx) mxm = mx;
/* perform serial local sum reduction: result in t */
t = 0.0f;
for (j = 0; j < mxm; j++) {
t += ss[j];
}
/* accumulate results to global memory for each block */
/* for devices with compute capability 2.x */
atomicAdd(&sa[0],t);
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpusum2(float a[], float d[], int nx) {
/* segmented 1d sum reductions, each of length mx = blockDim.x */
/* nx = length of data */
/* forall (j = 1:nbx); d(j) = sum(a(1+mx*(j-1):min(nx,mx*j))) */
/* local data */
int j, js, jb, mx, joff, mxm;
/* The size of the shared memory array is as follows: */
/* ss[blockDim.x]; */
extern __shared__ float ss[];
mx = blockDim.x;
js = threadIdx.x;
jb = blockIdx.x;
joff = mx*jb;
j = js + joff;
/* copy global data to shared memory */
if (j < nx) ss[js] = a[j];
/* synchronize to make sure each thread in block has the data */
__syncthreads();
mxm = nx - joff;
if (mxm > mx) mxm = mx;
/* perform parallel local sum reduction: result in ss[0] */
lsum2(ss,mxm);
/* write out result to global memory for each block */
if (js==0) d[jb] = ss[0];
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpubppush23l(float *ppart, float *fxy, float *bxy,
int *kpic, float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ipbc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
crc = hipGetLastError();
hipLaunchKernelGGL(( gpubppush23l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,fxy,bxy,kpic,qbm,dt,dtc,
ek,idimp,nppmx,nx,ny,mx,my,nxv,
nyv,mx1,mxy1,ipbc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpubppush23l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpubppushf23l(float *ppart, float *fxy, float *bxy,
int *kpic, int *ncl, int *ihole,
float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float) + nblock_size*sizeof(int);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
ns += 9*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpubppushf23l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,fxy,bxy,kpic,ncl,ihole,
qbm,dt,dtc,ek,idimp,nppmx,nx,
ny,mx,my,nxv,nyv,mx1,mxy1,
ntmax,irc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpubppushf23l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppush23l(float ppart[], float fxy[], float bxy[],
int kpic[], float qbm, float dt,
float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny,
int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ipbc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
crc = hipGetLastError();
hipLaunchKernelGGL(( gpurbppush23l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,fxy,bxy,kpic,qbm,dt,dtc,
ci,ek,idimp,nppmx,nx,ny,mx,my,
nxv,nyv,mx1,mxy1,ipbc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpurbppush23l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppushf23l(float *ppart, float *fxy, float *bxy,
int *kpic, int *ncl, int *ihole,
float qbm, float dt, float dtc,
float ci, float *ek, int idimp,
int nppmx, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float) + nblock_size*sizeof(int);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
ns += 9*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpurbppushf23l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,fxy,bxy,kpic,ncl,ihole,
qbm,dt,dtc,ci,ek,idimp,nppmx,
nx,ny,mx,my,nxv,nyv,mx1,mxy1,
ntmax,irc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpurbppushf23l error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2ppost2l(float *ppart, float *q, int *kpic,
float qm, int nppmx, int idimp, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1) {
/* Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = (mx + 1)*(my + 1)*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpu2ppost2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,q,kpic,qm,nppmx,idimp,mx,
my,nxv,nyv,mx1,mxy1);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpu2ppost2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppost2l(float *ppart, float *cu, int *kpic,
float qm, float dt, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpu2jppost2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,cu,kpic,qm,dt,nppmx,
idimp,nx,ny,mx,my,nxv,nyv,mx1,
mxy1,ipbc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpu2jppost2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppostf2l(float *ppart, float *cu, int *kpic,
int *ncl, int *ihole, float qm, float dt,
int nppmx, int idimp, int nx, int ny,
int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float) + (nblock_size+9)*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpu2jppostf2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,cu,kpic,ncl,ihole,qm,dt,
nppmx,idimp,nx,ny,mx,my,nxv,
nyv,mx1,mxy1,ntmax,irc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpu2jppostf2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppost2l(float *ppart, float *cu, int *kpic,
float qm, float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpu2rjppost2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,cu,kpic,qm,dt,ci,nppmx,
idimp,nx,ny,mx,my,nxv,nyv,mx1,
mxy1,ipbc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpu2rjppost2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppostf2l(float *ppart, float *cu, int *kpic,
int *ncl, int *ihole, float qm,
float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float) + (nblock_size+9)*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpu2rjppostf2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,cu,kpic,ncl,ihole,qm,
dt,ci,nppmx,idimp,nx,ny,mx,
my,nxv,nyv,mx1,mxy1,ntmax,
irc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpu2rjppostf2l error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucaguard2l(float2 *qc, float *q, int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* Guard Cell Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid(ny);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpucaguard2l), dim3(dimGrid),dim3(dimBlock), 0, 0, qc,q,nx,ny,nxe,nye,nxvh,nyv);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpucaguard2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucacguard2l(float2 *cuc, float *cu, int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* Guard Cell Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid(ny);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpucacguard2l), dim3(dimGrid),dim3(dimBlock), 0, 0, cuc,cu,nx,ny,nxe,nye,nxvh,nyv);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpucacguard2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucbguard2l(float2 *bxyc, float *bxy, int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* Guard Cell Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid(ny);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpucbguard2l), dim3(dimGrid),dim3(dimBlock), 0, 0, bxyc,bxy,nx,ny,nxe,nye,nxvh,nyv);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpucbguard2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppord2l(float *ppart, float *ppbuff, int *kpic,
int *ncl, int *ihole, int idimp, int nppmx,
int nx, int ny, int mx, int my, int mx1,
int my1, int npbmx, int ntmax, int *irc) {
/* Sort Interface for C */
int mxy1, n, m, ns;
dim3 dimBlock(nblock_size);
mxy1 = mx1*my1;
m = (mxy1 - 1)/maxgsx + 1;
n = mxy1 < maxgsx ? mxy1 : maxgsx;
dim3 dimGrid(n,m);
/* find which particles are leaving tile */
ns = (nblock_size+9)*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuppfnd2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,kpic,ncl,ihole,idimp,nppmx,
nx,ny,mx,my,mx1,my1,ntmax,irc);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuppfnd2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
/* buffer particles that are leaving tile and sum ncl */
ns = 9*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuppmov2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,ppbuff,ncl,ihole,idimp,
nppmx,mx1,my1,npbmx,ntmax,irc);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuppmov2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
/* copy incoming particles from ppbuff into ppart, update kpic */
ns = (nblock_size+18)*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuppord2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,ppbuff,kpic,ncl,ihole,
idimp,nppmx,mx1,my1,npbmx,ntmax,
irc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpuppord2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppordf2l(float *ppart, float *ppbuff, int *kpic,
int *ncl, int *ihole, int idimp, int nppmx,
int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* Sort Interface for C */
int mxy1, n, m, ns;
dim3 dimBlock(nblock_size);
mxy1 = mx1*my1;
m = (mxy1 - 1)/maxgsx + 1;
n = mxy1 < maxgsx ? mxy1 : maxgsx;
dim3 dimGrid(n,m);
/* buffer particles that are leaving tile and sum ncl */
ns = 9*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuppmov2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,ppbuff,ncl,ihole,idimp,
nppmx,mx1,my1,npbmx,ntmax,irc);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuppmov2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
/* copy incoming particles from ppbuff into ppart, update kpic */
ns = (nblock_size+18)*sizeof(int);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuppord2l), dim3(dimGrid),dim3(dimBlock),ns, 0, ppart,ppbuff,kpic,ncl,ihole,
idimp,nppmx,mx1,my1,npbmx,ntmax,
irc);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpuppord2l error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpupois23t(float2 *qt, float2 *fxyt, float2 *ffct,
float *we, int nx, int ny, int nxvh,
int nyv, int nxhd, int nyhd) {
/* Poisson Solver Interface for C */
int nxh1, ns;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
ns = nblock_size*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpupois23t), dim3(dimGrid),dim3(dimBlock),ns, 0, qt,fxyt,ffct,we,nx,ny,nxvh,nyv,
nxhd,nyhd);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpupois23t error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucuperp2t(float2 *cut, int nx, int ny, int nxvh,
int nyv) {
/* Poisson Solver Interface for C */
int nxh1;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpucuperp2t), dim3(dimGrid),dim3(dimBlock), 0, 0, cut,nx,ny,nxvh,nyv);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpucuperp2t error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuibpois23t(float2 *cut, float2 *bxyt, float2 *ffct,
float ci, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* Poisson Solver Interface for C */
int nxh1, ns;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
ns = nblock_size*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuibpois23t), dim3(dimGrid),dim3(dimBlock),ns, 0, cut,bxyt,ffct,ci,wm,nx,ny,nxvh,
nyv,nxhd,nyhd);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpuibpois23t error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpumaxwel2t(float2 *exyt, float2 *bxyt, float2 *cut,
float2 *ffct, float ci, float dt,
float *wf, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* Maxwell Solver Interface for C */
int nxh1, ns;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
ns = nblock_size*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpumaxwel2t), dim3(dimGrid),dim3(dimBlock),ns, 0, exyt,bxyt,cut,ffct,ci, dt,wf,wm,
nx,ny,nxvh,nyv,nxhd,nyhd);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpumaxwel2t error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuemfield2t(float2 *fxyt, float2 *exyt, float2 *ffct,
int isign, int nx, int ny, int nxvh,
int nyv, int nxhd, int nyhd) {
/* Maxwell Solver Interface for C */
int nxh1;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuemfield2t), dim3(dimGrid),dim3(dimBlock), 0, 0, fxyt,exyt,ffct,isign,nx,ny,nxvh,
nyv,nxhd,nyhd);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpuemfield2t error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcs(float2 *f, float2 *g, int isign,
int *mixup, float2 *sct, int indx,
int indy, int nxhd, int nyd, int nxhyd,
int nxyhd) {
/* wrapper function for real to complex fft, without packed data */
/* if isign = -1, f = input, g = output */
/* if isign = 1, g = input, f = output */
/* nxhd must be >= nx/2 + 1 */
/* local data */
int nxh, nxh1, ny, nsize, ns;
int nxi = 1, nyi = 1, mx = 16;
dim3 dimBlock(nblock_size);
dim3 dimBlockt(mx,mx);
/* calculate range of indices */
nxh = 1L<<(indx - 1);
nxh1 = nxh + 1;
ny = 1L<<indy;
dim3 dimGridx(nxh1);
dim3 dimGridy(ny);
dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1);
dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1);
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcxs), dim3(dimGridy),dim3(dimBlock),ns, 0, f,isign,mixup,sct,indx,indy,
nyi,ny,nxhd,nyd,nxhyd,nxyhd,
nsize);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
/* transpose f to g */
ns = (mx+1)*mx*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuctpose4), dim3(dimGridtx),dim3(dimBlockt),ns, 0, f,g,nxh1,ny,nxhd,nyd);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuctpose4 error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcys), dim3(dimGridx),dim3(dimBlock),ns, 0, g,isign,mixup,sct,indx,indy,
nxi,nxh1,nxhd,nyd,nxhyd,
nxyhd,nsize);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
/* transpose g to f */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = hipGetLastError(); */
/* hipLaunchKernelGGL(( gpuctpose4), dim3(dimGridty),dim3(dimBlockt),ns, 0, g,f,ny,nxh1,nyd,nxhd); */
/* hipDeviceSynchronize(); */
/* crc = hipGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4 error=%d:%s\n",crc,hipGetErrorString(crc)); */
/* exit(1); */
/* } */
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f to g */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = hipGetLastError(); */
/* hipLaunchKernelGGL(( gpuctpose4), dim3(dimGridtx),dim3(dimBlockt),ns, 0, f,g,nxh1,ny,nxhd,nyd); */
/* hipDeviceSynchronize(); */
/* crc = hipGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4 error=%d:%s\n",crc,hipGetErrorString(crc)); */
/* exit(1); */
/* } */
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcys), dim3(dimGridx),dim3(dimBlock),ns, 0, g,isign,mixup,sct,indx,indy,
nxi,nxh1,nxhd,nyd,nxhyd,
nxyhd,nsize);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
/* transpose g to f */
ns = (mx+1)*mx*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuctpose4), dim3(dimGridty),dim3(dimBlockt),ns, 0, g,f,ny,nxh1,nyd,nxhd);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuctpose4 error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcxs), dim3(dimGridy),dim3(dimBlock),ns, 0, f,isign,mixup,sct,indx,indy,
nyi,ny,nxhd,nyd,nxhyd,nxyhd,
nsize);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcsn(float2 *fn, float2 *gn, int isign,
int *mixup, float2 *sct, int indx,
int indy, int ndim, int nxhd, int nyd,
int nxhyd, int nxyhd) {
/* wrapper function for multiple real to complex ffts, */
/* without packed data */
/* if isign = -1, fn = input, gn = output */
/* if isign = 1, gn = input, fn = output */
/* ndim = vector dimension */
/* nxhd must be >= nx/2 + 1 */
/* local data */
int nxh, nxh1, ny, nxp, nyp, nnxd, nnyd, nsize, ns;
int nxi = 1, nyi = 1, mx = 16;
dim3 dimBlock(nblock_size);
dim3 dimBlockt(mx,mx);
/* calculate range of indices */
nxh = 1L<<(indx - 1);
nxh1 = nxh + 1;
ny = 1L<<indy;
nxp = ndim*nxh1;
nyp = ndim*ny;
nnxd = ndim*nxhd;
nnyd = ndim*nyd;
dim3 dimGridx(nxp);
dim3 dimGridy(nyp);
dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1);
dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1);
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcxs), dim3(dimGridy),dim3(dimBlock),ns, 0, fn,isign,mixup,sct,indx,
indy,nyi,nyp,nxhd,nnyd,
nxhyd,nxyhd,nsize);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
/* transpose f to g */
ns = ndim*(mx+1)*mx*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuctpose4n), dim3(dimGridtx),dim3(dimBlockt),ns, 0, fn,gn,nxh1,ny,ndim,nxhd,
nyd);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuctpose4n error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcys), dim3(dimGridx),dim3(dimBlock),ns, 0, gn,isign,mixup,sct,indx,
indy,nxi,nxp,nnxd,nyd,nxhyd,
nxyhd,nsize);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
/* transpose g to f */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = hipGetLastError(); */
/* hipLaunchKernelGGL(( gpuctpose4n), dim3(dimGridty),dim3(dimBlockt),ns, 0, gn,fn,ny,nxh1,ndim,nyd, */
/* nxhd); */
/* hipDeviceSynchronize(); */
/* crc = hipGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4 error=%d:%s\n",crc, */
/* hipGetErrorString(crc)); */
/* exit(1); */
/* } */
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f to g */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = hipGetLastError(); */
/* hipLaunchKernelGGL(( gpuctpose4n), dim3(dimGridtx),dim3(dimBlockt),ns, 0, fn,gn,nxh1,ny,ndim,nxhd, */
/* nyd); */
/* hipDeviceSynchronize(); */
/* crc = hipGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4n error=%d:%s\n",crc, */
/* hipGetErrorString(crc)); */
/* exit(1); */
/* } */
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcys), dim3(dimGridx),dim3(dimBlock),ns, 0, gn,isign,mixup,sct,indx,
indy,nxi,nxp,nnxd,nyd,nxhyd,
nxyhd,nsize);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
/* transpose g to f */
ns = ndim*(mx+1)*mx*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpuctpose4n), dim3(dimGridty),dim3(dimBlockt),ns, 0, gn,fn,ny,nxh1,ndim,nyd,
nxhd);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpuctpose4n error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpufft2rcxs), dim3(dimGridy),dim3(dimBlock),ns, 0, fn,isign,mixup,sct,indx,
indy,nyi,nyp,nxhd,nnyd,
nxhyd,nxyhd,nsize);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,hipGetErrorString(crc));
exit(1);
}
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpusum2(float *a, float *sa, int nx) {
/* segmented 1d parallel sum reduction of input array a, of length nx */
/* first reduce individual blocks in parallel, writing result to scr */
/* then reduce scr serially, result is written to sa */
/* local data */
int nbx, nbs, ns;
void *gptr;
static int len = 0;
static float *scr = NULL;
nbx = (nx - 1)/nblock_size + 1;
dim3 dimBlock(nblock_size);
dim3 dimGrid(nbx);
nbs = (nbx - 1)/nblock_size + 1;
dim3 dimGrid1(nbs);
/* create scratch array */
if (len < nbx) {
if (len > 0)
crc = hipFree((void *)scr);
crc = hipMalloc(&gptr,sizeof(float)*nbx);
if (crc) {
printf("hipMalloc cgpusum2 float Error=%d:%s,l=%d\n",crc,
hipGetErrorString(crc),nbx);
exit(1);
}
scr = (float *)gptr;
len = nbx;
}
/* reduce individual blocks in parallel */
ns = nblock_size*sizeof(float);
crc = hipGetLastError();
hipLaunchKernelGGL(( gpusum2), dim3(dimGrid),dim3(dimBlock),ns, 0, a,scr,nx);
/* hipDeviceSynchronize(); */
crc = hipGetLastError();
if (crc) {
printf("gpusum2 error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
/* 1d serial reduction */
crc = hipGetLastError();
hipLaunchKernelGGL(( gpusum1), dim3(dimGrid1),dim3(dimBlock),ns, 0, scr,sa,nbx);
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("gpusum1 error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void cgpubppush23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic, float *qbm,
float *dt, float *dtc,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ek = (float *)*gp_ek;
cgpubppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpubppushf23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qbm,
float *dt, float *dtc,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ntmax,
unsigned long *gp_irc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
ek = (float *)*gp_ek;
irc = (int *)*gp_irc;
cgpubppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp,
*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,
irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppush23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic, float *qbm,
float *dt, float *dtc, float *ci,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ek = (float *)*gp_ek;
cgpurbppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppushf23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qbm,
float *dt, float *dtc, float *ci,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ntmax,
unsigned long *gp_irc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
ek = (float *)*gp_ek;
irc = (int *)*gp_irc;
cgpurbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2ppost2l_(unsigned long *gp_ppart,
unsigned long *gp_q,
unsigned long *gp_kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1) {
float *ppart, *q;
int *kpic;
ppart = (float *)*gp_ppart;
q = (float *)*gp_q;
kpic = (int *)*gp_kpic;
cgpu2ppost2l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppost2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic, float *qm,
float *dt, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1,
int *ipbc) {
float *ppart, *cu;
int *kpic;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
cgpu2jppost2l(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppostf2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qm,
float *dt, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, unsigned long *gp_irc) {
float *ppart, *cu;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpu2jppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny,
*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppost2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic, float *qm,
float *dt, float *ci, int *nppmx,
int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
float *ppart, *cu;
int *kpic;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
cgpu2rjppost2l(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx,
*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppostf2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qm,
float *dt, float *ci, int *nppmx,
int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int*mx1,
int *mxy1, int *ntmax,
unsigned long *gp_irc) {
float *ppart, *cu;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpu2rjppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucaguard2l_(unsigned long *gp_qc,
unsigned long *gp_q, int *nx, int *ny,
int *nxe, int *nye, int *nxvh,
int *nyv) {
float2 *qc;
float *q;
qc = (float2 *)*gp_qc;
q = (float *)*gp_q;
cgpucaguard2l(qc,q,*nx,*ny,*nxe,*nye,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucacguard2l_(unsigned long *gp_cuc,
unsigned long *gp_cu, int *nx, int *ny,
int *nxe, int *nye, int *nxvh,
int *nyv) {
float2 *cuc;
float *cu;
cuc = (float2 *)*gp_cuc;
cu = (float *)*gp_cu;
cgpucacguard2l(cuc,cu,*nx,*ny,*nxe,*nye,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucbguard2l_(unsigned long *gp_bxyc,
unsigned long *gp_bxy, int *nx, int *ny,
int *nxe, int *nye, int *nxvh,
int *nyv) {
float2 *bxyc;
float *bxy;
bxyc = (float2 *)*gp_bxyc;
bxy = (float *)*gp_bxy;
cgpucbguard2l(bxyc,bxy,*nx,*ny,*nxe,*nye,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppord2l_(unsigned long *gp_ppart,
unsigned long *gp_ppbuff,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, unsigned long *gp_irc) {
float *ppart, *ppbuff;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
ppbuff = (float *)*gp_ppbuff;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpuppord2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,
*my,*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppordf2l_(unsigned long *gp_ppart,
unsigned long *gp_ppbuff,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, int *idimp,
int *nppmx, int *mx1, int *my1, int *npbmx,
int *ntmax, unsigned long *gp_irc) {
float *ppart, *ppbuff;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
ppbuff = (float *)*gp_ppbuff;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpuppordf2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpupois23t_(unsigned long *gp_qt,
unsigned long *gp_fxyt,
unsigned long *gp_ffct,
unsigned long *gp_we, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd,
int *nyhd) {
float2 *qt, *fxyt, *ffct;
float *we;
qt = (float2 *)*gp_qt;
fxyt = (float2 *)*gp_fxyt;
ffct = (float2 *)*gp_ffct;
we = (float *)*gp_we;
cgpupois23t(qt,fxyt,ffct,we,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucuperp2t_(unsigned long *gp_cut, int *nx, int *ny,
int *nxvh, int *nyv) {
float2 *cut;
cut = (float2 *)*gp_cut;
cgpucuperp2t(cut,*nx,*ny,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuibpois23t_(unsigned long *gp_cut,
unsigned long *gp_bxyt,
unsigned long *gp_ffct, float *ci,
unsigned long *gp_wm, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd,
int *nyhd) {
float2 *cut, *bxyt, *ffct;
float *wm;
cut = (float2 *)*gp_cut;
bxyt = (float2 *)*gp_bxyt;
ffct = (float2 *)*gp_ffct;
wm = (float *)*gp_wm;
cgpuibpois23t(cut,bxyt,ffct,*ci,wm,*nx,*ny,*nxvh,*nyv, *nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpumaxwel2t_(unsigned long *gp_exyt,
unsigned long *gp_bxyt,
unsigned long *gp_cut,
unsigned long *gp_ffct, float *ci,
float *dt, unsigned long *gp_wf,
unsigned long *gp_wm, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd,
int *nyhd) {
float2 *cut, *exyt, *bxyt, *ffct;
float *wf, *wm;
cut = (float2 *)*gp_cut;
exyt = (float2 *)*gp_exyt;
bxyt = (float2 *)*gp_bxyt;
ffct = (float2 *)*gp_ffct;
wf = (float *)*gp_wf;
wm = (float *)*gp_wm;
cgpumaxwel2t(exyt,bxyt,cut,ffct,*ci,*dt,wf,wm,*nx,*ny,*nxvh,*nyv,
*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuemfield2t_(unsigned long *gp_fxyt,
unsigned long *gp_exyt,
unsigned long *gp_ffct, int *isign,
int *nx, int *ny, int *nxvh, int *nyv,
int *nxhd, int *nyhd) {
float2 *fxyt, *exyt, *ffct;
fxyt = (float2 *)*gp_fxyt;
exyt = (float2 *)*gp_exyt;
ffct = (float2 *)*gp_ffct;
cgpuemfield2t(fxyt,exyt,ffct,*isign,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcs_(unsigned long *gp_f, unsigned long *gp_g,
int *isign, unsigned long *gp_mixup,
unsigned long *gp_sct, int *indx,
int *indy, int *nxhd, int *nyd,
int *nxhyd, int *nxyhd) {
float2 *f, *g, *sct;
int *mixup;
f = (float2 *)*gp_f;
g = (float2 *)*gp_g;
mixup = (int *)*gp_mixup;
sct = (float2 *)*gp_sct;
cgpuwfft2rcs(f,g,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcsn_(unsigned long *gp_fn,
unsigned long *gp_gn, int *isign,
unsigned long *gp_mixup,
unsigned long *gp_sct, int *indx,
int *indy, int *ndim, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
float2 *fn, *gn, *sct;
int *mixup;
fn = (float2 *)*gp_fn;
gn = (float2 *)*gp_gn;
mixup = (int *)*gp_mixup;
sct = (float2 *)*gp_sct;
cgpuwfft2rcsn(fn,gn,*isign,mixup,sct,*indx,*indy,*ndim,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpusum2_(unsigned long *gp_a, unsigned long *gp_sa,
int *nx) {
float *a, *sa;
a = (float *)*gp_a;
sa = (float *)*gp_sa;
cgpusum2(a,sa,*nx);
return;
}
| 0328bc52b0052da36e8b70b9d86911504e9d88c5.cu | /* CUDA Library for Skeleton 2-1/2D Electromagnetic GPU PIC Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
extern int nblock_size;
extern int maxgsx;
static cudaError_t crc;
/*--------------------------------------------------------------------*/
__device__ void liscan2(int *isdata, int nths) {
/* performs local prefix reduction of integer data shared by threads */
/* using binary tree method. */
/* local data */
int l, mb, kxs, lb, kb;
l = threadIdx.x;
mb = l;
kxs = 1;
while (kxs < nths) {
lb = kxs*mb;
kb = 2*lb + kxs - 1;
lb += l + kxs;
if (lb < nths) {
isdata[lb] += isdata[kb];
}
__syncthreads();
mb >>= 1;
kxs <<= 1;
}
return;
}
/*--------------------------------------------------------------------*/
__device__ void lsum2(float *sdata, int n) {
/* finds local sum of nths data items shared by threads */
/* using binary tree method. input is modified. */
/* local data */
int l, k;
float s;
l = threadIdx.x;
k = blockDim.x >> 1;
s = 0.0f;
if (l < n) s = sdata[l];
while (k > 0) {
if (l < k) {
if ((l+k) < n) {
s += sdata[l+k];
sdata[l] = s;
}
}
__syncthreads();
k >>= 1;
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpubppush23l(float ppart[], float fxy[], float bxy[],
int kpic[], float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
threaded version using guard cells
data read in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm, nm;
float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* to conserve memory, sek overlaps with sfxy and sbxy */
/* and the name sfxy is used instead of sek */
float *sbxy;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
double sum1;
qtmh = 0.5f*qbm*dt;
sum1 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+npoff+nppmx];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+npoff+nppmx*2] + dx;
acy = ppart[j+npoff+nppmx*3] + dy;
acz = ppart[j+npoff+nppmx*4] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+npoff+nppmx*2] = dx;
ppart[j+npoff+nppmx*3] = dy;
ppart[j+npoff+nppmx*4] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* normalize kinetic energy of tile */
if (threadIdx.x==0) {
ek[k] = 0.5f*sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpubppushf23l(float ppart[], float fxy[], float bxy[],
int kpic[], int ncl[], int ihole[],
float qbm, float dt, float dtc, float *ek,
int idimp, int nppmx, int nx, int ny,
int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
with periodic boundary conditions.
also determines list of particles which are leaving this tile
threaded version using guard cells
data read in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm, nm;
float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* int sih[blockDim.x], sncl[8], nh[1]; */
/* to conserve memory, sek overlaps with sfxy, sbxy, and sih */
/* and the name sfxy is used instead of sek */
float *sbxy;
int *sncl, *sih, *nh;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
sih = (int *)&sfxy[6*(mx+1)*(my+1)];
sncl = (int *)&sih[blockDim.x];
nh = (int *)&sfxy[blockDim.x];
sncl = sncl > nh ? sncl : nh;
nh = (int *)&sncl[8];
double sum1;
qtmh = 0.5f*qbm*dt;
anx = (float) nx;
any = (float) ny;
sum1 = 0.0;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
nn += 1;
mm += 1;
/* load local fields from global array */
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+2*nppmx+npoff] = dx;
ppart[j+3*nppmx+npoff] = dy;
ppart[j+4*nppmx+npoff] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
/* normalize kinetic energy of tile */
ek[k] = 0.5f*sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpurbppush23l(float ppart[], float fxy[], float bxy[],
int kpic[], float qbm, float dt,
float dtc, float ci, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
threaded version using guard cells
data read in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprical of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm, nm;
float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg;
float omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* to conserve memory, sek overlaps with sfxy and sbxy */
/* and the name sfxy is used instead of sek */
float *sbxy;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
double sum1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
sum1 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+npoff+nppmx];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+npoff+nppmx*2] + dx;
acy = ppart[j+npoff+nppmx*3] + dy;
acz = ppart[j+npoff+nppmx*4] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+npoff+nppmx*2] = dx;
ppart[j+npoff+nppmx*3] = dy;
ppart[j+npoff+nppmx*4] = dz;
/* update inverse gamma */
p2 = dx*dx + dy*dy + dz*dz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + dx*dtg;
dy = y + dy*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* normalize kinetic energy of tile */
if (threadIdx.x==0) {
ek[k] = sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpurbppushf23l(float ppart[], float fxy[], float bxy[],
int kpic[], int ncl[], int ihole[],
float qbm, float dt, float dtc, float ci,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
with periodic boundary conditions.
Using the Boris Mover.
also determines list of particles which are leaving this tile
threaded version using guard cells
data read in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprical of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of field arrays, must be >= nx+1
nyv = second dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm, nm;
float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
/* The sizes of the shared memory arrays are as follows: */
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
/* float sek[blockDim.x]; */
/* int sih[blockDim.x], sncl[8], nh[1]; */
/* to conserve memory, sek overlaps with sfxy, sbxy, and sih */
/* and the name sfxy is used instead of sek */
float *sbxy;
int *sncl, *sih, *nh;
extern __shared__ float sfxy[];
sbxy = &sfxy[3*(mx+1)*(my+1)];
sih = (int *)&sfxy[6*(mx+1)*(my+1)];
sncl = (int *)&sih[blockDim.x];
nh = (int *)&sfxy[blockDim.x];
sncl = sncl > nh ? sncl : nh;
nh = (int *)&sncl[8];
double sum1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
sum1 = 0.0;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
nn += 1;
mm += 1;
/* load local fields from global array */
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sfxy[3*ii] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*ii] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*ii] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
sbxy[3*ii] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*ii] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*ii] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
ii += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += 3*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += 3*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[j+2*nppmx+npoff] = dx;
ppart[j+3*nppmx+npoff] = dy;
ppart[j+4*nppmx+npoff] = dz;
/* update inverse gamma */
p2 = dx*dx + dy*dy + dz*dz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + dx*dtg;
dy = y + dy*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* add kinetic energies in tile */
sfxy[threadIdx.x] = (float) sum1;
/* synchronize threads */
__syncthreads();
lsum2(sfxy,blockDim.x);
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
/* normalize kinetic energy of tile */
ek[k] = sfxy[0];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2ppost2l(float ppart[], float q[], int kpic[],
float qm, int nppmx, int idimp, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
threaded version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm, np, mp;
float dxp, dyp, amx, amy;
/* The size of the shared memory array is as follows: */
/* float sq[(mx+1)*(my+1)] */
extern __shared__ float sq[];
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
i = threadIdx.x;
while (i < mxv*(my+1)) {
sq[i] = 0.0f;
i += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
dxp = ppart[j+npoff];
nn = dxp;
dyp = ppart[j+npoff+nppmx];
mm = dyp;
dxp = qm*(dxp - (float) nn);
dyp = dyp - (float) mm;
nn = nn - noff;
mm = mxv*(mm - moff);
amx = qm - dxp;
mp = mm + mxv;
amy = 1.0f - dyp;
np = nn + 1;
/* deposit charge within tile to local accumulator */
/* original deposit charge, has data hazard on GPU */
/* sq[np+mp] += dxp*dyp; */
/* sq[nn+mp] += amx*dyp; */
/* sq[np+mm] += dxp*amy; */
/* sq[nn+mm] += amx*amy; */
/* for devices with compute capability 2.x */
atomicAdd(&sq[np+mp],dxp*dyp);
atomicAdd(&sq[nn+mp],amx*dyp);
atomicAdd(&sq[np+mm],dxp*amy);
atomicAdd(&sq[nn+mm],amx*amy);
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* deposit charge to global array */
nn = mxv < nxv-noff ? mxv : nxv-noff;
mm = my+1 < nyv-moff ? my+1 : nyv-moff;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original deposit charge, has data hazard on GPU */
/* q[i+noff+nxv*(j+moff)] += sq[ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&q[i+noff+nxv*(j+moff)],sq[ii]);
}
ii += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2jppost2l(float ppart[], float cu[], int kpic[],
float qm, float dt, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
threaded version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm;
float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
/* The size of the shared memory array is as follows: */
/* float scu[3*(mx+1)*(my+1)] */
extern __shared__ float scu[];
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+npoff+nppmx];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2jppostf2l(float ppart[], float cu[], int kpic[],
int ncl[], int ihole[], float qm,
float dt, int nppmx, int idimp, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
threaded version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
ppart[m][4][n] = velocity vz of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm;
float dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float anx, any, edgelx, edgely, edgerx, edgery;
/* The sizes of the shared memory arrays are as follows: */
/* float scu[3*(mx+1)*(my+1)]; */
/* int sncl[8], sih[blockDim.x], nh[1]; */
int *sncl, *sih, *nh;
extern __shared__ float scu[];
sncl = (int *)&scu[3*(mx+1)*(my+1)];
sih = (int *)&sncl[8];
nh = (int *)&sih[blockDim.x];
anx = (float) nx;
any = (float) ny;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+npoff+nppmx];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2rjppost2l(float ppart[], float cu[], int kpic[],
float qm, float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
threaded version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprical of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
int noff, moff, npoff, npp, mxv;
int i, j, k, ii, nn, mm;
float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
/* The size of the shared memory array is as follows: */
/* float scu[3*(mx+1)*(my+1)] */
extern __shared__ float scu[];
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
j = threadIdx.x;
while (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+nppmx+npoff];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[j+npoff+nppmx];
ppart[j+npoff+nppmx*3] = -ppart[j+npoff+nppmx*3];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[j+npoff];
ppart[j+npoff+nppmx*2] = -ppart[j+npoff+nppmx*2];
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+npoff+nppmx] = dy;
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpu2rjppostf2l(float ppart[], float cu[], int kpic[],
int ncl[], int ihole[], float qm,
float dt, float ci, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax,
int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
threaded version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprical of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
int noff, moff, npoff, nhoff, mhoff, npp, mxv;
int i, j, k, ii, ih, nn, mm;
float ci2, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
float anx, any, edgelx, edgely, edgerx, edgery;
/* The sizes of the shared memory arrays are as follows: */
/* float scu[3*(mx+1)*(my+1)]; */
/* int sncl[8], sih[blockDim.x], nh[1]; */
int *sncl, *sih, *nh;
extern __shared__ float scu[];
sncl = (int *)&scu[3*(mx+1)*(my+1)];
sih = (int *)&sncl[8];
nh = (int *)&sih[blockDim.x];
anx = (float) nx;
any = (float) ny;
ci2 = ci*ci;
mxv = mx + 1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* zero out local accumulator */
i = threadIdx.x;
while (i < 3*mxv*(my+1)) {
scu[i] = 0.0f;
i += blockDim.x;
}
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
ii = (npp - 1)/(int) blockDim.x + 1;
nhoff = 0;
for (i = 0; i < ii; i++) {
j = threadIdx.x + blockDim.x*i;
sih[threadIdx.x] = 0;
if (j < npp) {
/* find interpolation weights */
x = ppart[j+npoff];
nn = x;
y = ppart[j+npoff+nppmx];
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[j+npoff+nppmx*2];
vy = ppart[j+npoff+nppmx*3];
vz = ppart[j+npoff+nppmx*4];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = 3*(nn - noff) + 3*mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
dx = amx*dyp;
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
dy = dxp*dyp;
nn += 3*mxv;
/* original current deposit, has data hazard on GPU */
/* scu[nn] += vx*dx; */
/* scu[nn+1] += vy*dx; */
/* scu[nn+2] += vz*dx; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[nn],vx*dx);
atomicAdd(&scu[nn+1],vy*dx);
atomicAdd(&scu[nn+2],vz*dx);
mm = nn + 3;
/* original current deposit, has data hazard on GPU */
/* scu[mm] += vx*dy; */
/* scu[mm+1] += vy*dy; */
/* scu[mm+2] += vz*dy; */
/* for devices with compute capability 2.x */
atomicAdd(&scu[mm],vx*dy);
atomicAdd(&scu[mm+1],vy*dy);
atomicAdd(&scu[mm+2],vz*dy);
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (mm > 0) {
atomicAdd(&sncl[mm-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nn = npp - blockDim.x*i;
if (nn > blockDim.x)
nn = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nn);
if (j < npp) {
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* write out location and direction of departing particles */
ih = sih[threadIdx.x];
mhoff = 0;
if (threadIdx.x > 0)
mhoff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > mhoff) {
ih += nhoff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nn > 0)
nhoff += sih[nn-1];
/* synchronize threads */
__syncthreads();
}
/* deposit current to global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx+1 < nn ? mx+1 : nn;
mm = my+1 < mm ? my+1 : mm;
ii = threadIdx.x;
while (ii < mxv*(my+1)) {
j = ii/mxv;
i = ii - mxv*j;
if ((i < nn) && (j < mm)) {
/* original current deposit, has data hazard on GPU */
/* cu[3*(i+noff+nxv*(j+moff))] += scu[3*ii]; */
/* cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*ii]; */
/* cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*ii]; */
/* for devices with compute capability 2.x */
atomicAdd(&cu[3*(i+noff+nxv*(j+moff))],scu[3*ii]);
atomicAdd(&cu[1+3*(i+noff+nxv*(j+moff))],scu[1+3*ii]);
atomicAdd(&cu[2+3*(i+noff+nxv*(j+moff))],scu[2+3*ii]);
}
ii += blockDim.x;
}
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = nhoff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucaguard2l(float2 qc[], float q[], int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* copy and accumulate extended periodic scalar field q
into complex output field qc
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of input field array q, must be >= nx+1
nye = second dimension of input field array q, must be >= ny+1
nxvh = first dimension of output field array qc, must be >= nx/2+1
nyv = second dimension of output field array qc, must be >= ny */
/* local data */
int j, k, nxh;
float at1, at2;
float2 a;
nxh = nx/2;
k = blockIdx.x;
/* copy interior points */
if (k < ny) {
j = threadIdx.x;
at2 = 0.0f;
while (j < nxh) {
if (k==0) {
at1 = q[2*j+nxe*ny];
at2 = q[2*j+1+nxe*ny];
if (j==0) {
at1 += q[nx] + q[nx+nxe*ny];
}
}
if (k > 0) {
at1 = 0.0f;
if (j==0) {
at1 = q[nx+nxe*k];
}
}
a.x = q[2*j+nxe*k] + at1;
a.y = q[2*j+1+nxe*k] + at2;
qc[j+nxvh*k] = a;
j += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucacguard2l(float2 cuc[], float cu[], int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* copy and accumulate extended periodic vector field cu
into complex output field cuc
linear interpolation
nx/ny = system length in x/y direction
nxe = second dimension of input field array cu, must be >= nx+1
nye = third dimension of input field array cu, must be >= ny+1
nxvh = first dimension of output field array cuc, must be >= nx/2+1
nyv = third dimension of output field array cuc, must be >= ny */
/* local data */
int j, k, nxh;
float at1, at2, at3, at4, at5, at6;
float2 a;
nxh = nx/2;
k = blockIdx.x;
/* copy interior points */
if (k < ny) {
at2 = 0.0f;
at4 = 0.0f;
at6 = 0.0f;
j = threadIdx.x;
while (j < nxh) {
if (k==0) {
at1 = cu[3*(2*j+nxe*ny)];
at2 = cu[3*(2*j+1+nxe*ny)];
at3 = cu[1+3*(2*j+nxe*ny)];
at4 = cu[1+3*(2*j+1+nxe*ny)];
at5 = cu[2+3*(2*j+nxe*ny)];
at6 = cu[2+3*(2*j+1+nxe*ny)];
if (j==0) {
at1 += cu[3*nx] + cu[3*(nx+nxe*ny)];
at3 += cu[1+3*nx] + cu[1+3*(nx+nxe*ny)];
at5 += cu[2+3*nx] + cu[2+3*(nx+nxe*ny)];
}
}
if (k > 0) {
at1 = 0.0f;
at3 = 0.0f;
at5 = 0.0f;
if (j==0) {
at1 = cu[3*(nx+nxe*k)];
at3 = cu[1+3*(nx+nxe*k)];
at5 = cu[2+3*(nx+nxe*k)];
}
}
a.x = cu[3*(2*j+nxe*k)] + at1;
a.y = cu[3*(2*j+1+nxe*k)] + at2;
cuc[j+nxvh*3*k] = a;
a.x = cu[1+3*(2*j+nxe*k)] + at3;
a.y = cu[1+3*(2*j+1+nxe*k)] + at4;
cuc[j+nxvh*(1+3*k)] = a;
a.x = cu[2+3*(2*j+nxe*k)] + at5;
a.y = cu[2+3*(2*j+1+nxe*k)] + at6;
cuc[j+nxvh*(2+3*k)] = a;
j += blockDim.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucbguard2l(float2 bxyc[], float bxy[], int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* copy and replicate complex input 2d vector field bxyc
into extended periodic field bxy
linear interpolation
nx/ny = system length in x/y direction
nxe = second dimension of input field array bxy, must be >= nx+1
nye = third dimension of input field array bxy, must be >= ny+1
nxvh = first dimension of input field array bxyc, must be >= nx/2+1
nyv = third dimension of input field array bxyc, must be >= ny */
/* local data */
int j, k, nxh;
float2 a, b, c;
nxh = nx/2;
k = blockIdx.x;
/* copy interior points */
if (k < ny) {
j = threadIdx.x;
while (j < nxh) {
a = bxyc[j+nxvh*3*k];
b = bxyc[j+nxvh*(1+3*k)];
c = bxyc[j+nxvh*(2+3*k)];
bxy[3*(2*j+nxe*k)] = a.x;
bxy[1+3*(2*j+nxe*k)] = b.x;
bxy[2+3*(2*j+nxe*k)] = c.x;
bxy[3*(2*j+1+nxe*k)] = a.y;
bxy[1+3*(2*j+1+nxe*k)] = b.y;
bxy[2+3*(2*j+1+nxe*k)] = c.y;
j += blockDim.x;
}
}
/* accumulate edges of extended field */
if (blockIdx.x==0) {
k = threadIdx.x;
while (k < ny) {
a = bxyc[nxvh*3*k];
b = bxyc[nxvh*(1+3*k)];
c = bxyc[nxvh*(2+3*k)];
bxy[3*(nx+nxe*k)] = a.x;
bxy[1+3*(nx+nxe*k)] = b.x;
bxy[2+3*(nx+nxe*k)] = c.x;
k += blockDim.x;
}
j = threadIdx.x;
while (j < nxh) {
a = bxyc[j];
b = bxyc[j+nxvh];
c = bxyc[j+2*nxvh];
bxy[3*(2*j+nxe*ny)] = a.x;
bxy[1+3*(2*j+nxe*ny)] = b.x;
bxy[2+3*(2*j+nxe*ny)] = c.x;
bxy[3*(2*j+1+nxe*ny)] = a.y;
bxy[1+3*(2*j+1+nxe*ny)] = b.y;
bxy[2+3*(2*j+1+nxe*ny)] = c.y;
j += blockDim.x;
}
if (threadIdx.x==0) {
a = bxyc[0];
b = bxyc[nxvh];
c = bxyc[nxvh*2];
bxy[3*(nx+nxe*ny)] = a.x;
bxy[1+3*(nx+nxe*ny)] = b.x;
bxy[2+3*(nx+nxe*ny)] = c.x;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuppfnd2l(float ppart[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx,
int ny, int mx, int my, int mx1, int my1,
int ntmax, int *irc) {
/* this subroutine performs first step of a particle sort by x,y grid
in tiles of mx, my, where one finds the particles leaving tile and
stores their number, location, and destination in ncl and ihole.
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
input: all except ncl, ihole, irc
output: ppart, ncl, ihole, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npp, j, k, ih, ist, nn, mm, nths;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
/* The sizes of the shared memory arrays are as follows: */
/* int sncl[8], sih[blockDim.x], nh[1]; */
int *sncl, *sih, *nh;
extern __shared__ int shm[];
sncl = (int *)&shm[0];
sih = (int *)&shm[8];
nh = (int *)&shm[8+blockDim.x];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
if (k < mxy1) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
j = threadIdx.x;
while (j < 8) {
sncl[j] = 0;
j += blockDim.x;
}
if (threadIdx.x==0) {
nh[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* loop over particles in tile */
mm = (npp - 1)/(int) blockDim.x + 1;
noff = 0;
for (nn = 0; nn < mm; nn++) {
j = threadIdx.x + blockDim.x*nn;
sih[threadIdx.x] = 0;
if (j < npp) {
dx = ppart[j+nppmx*(idimp*k)];
dy = ppart[j+nppmx*(1+idimp*k)];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+nppmx*(idimp*k)] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0f;
ppart[j+nppmx*(idimp*k)] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx*(1+idimp*k)] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0f;
ppart[j+nppmx*(1+idimp*k)] = dy;
}
else {
ist += 3;
}
}
/* using prefix scan for ih to keep holes ordered */
if (ist > 0) {
atomicAdd(&sncl[ist-1],1);
sih[threadIdx.x] = 1;
}
}
/* synchronize threads */
__syncthreads();
nths = npp - blockDim.x*nn;
if (nths > blockDim.x)
nths = blockDim.x;
/* perform local prefix reduction */
liscan2(sih,nths);
if (j < npp) {
ih = sih[threadIdx.x];
moff = 0;
if (threadIdx.x > 0)
moff = sih[threadIdx.x-1];
/* this thread has a hole present */
if (ih > moff) {
ih += noff;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh[0] = 1;
}
}
}
/* update number of holes in this iteration */
if (nths > 0)
noff += sih[nths-1];
/* synchronize threads */
__syncthreads();
}
/* write out counters */
j = threadIdx.x;
while (j < 8) {
ncl[j+8*k] = sncl[j];
j += blockDim.x;
}
/* set error and end of file flag */
if (threadIdx.x==0) {
/* ihole overflow */
ih = noff;
if (nh[0] > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuppmov2l(float ppart[], float ppbuff[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1,
int my1, int npbmx, int ntmax, int *irc) {
/* this subroutine performs second step of a particle sort by x,y grid
in tiles of mx, my, where prefix scan of ncl is performed and
departing particles are buffered in ppbuff in direction order.
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
input: all except ppbuff, irc
output: ppbuff, ncl, irc
ppart[k][i][n] = i co-ordinate of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, i, j, k, ii, nh, ist, j1, ierr;
/* The sizes of the shared memory arrays are as follows: */
/* int sncl[8], ip[1]; */
/* blockDim.x should be >= 8 */
int *sncl, *ip;
extern __shared__ int shm[];
sncl = (int *)&shm[0];
ip = (int *)&shm[8];
mxy1 = mx1*my1;
ierr = 0;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
j = threadIdx.x;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
if (k < mxy1) {
/* find address offset for ordered ppbuff array */
if (j < 8) {
ist = ncl[j+8*k];
sncl[j] = ist;
}
if (threadIdx.x==0)
ip[0] = 0;
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sncl,8);
if (j < 8)
sncl[j] -= ist;
/* synchronize threads */
__syncthreads();
nh = ihole[2*(ntmax+1)*k];
/* loop over particles leaving tile */
while (j < nh) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = atomicAdd(&sncl[ist-1],1);
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*(i+idimp*k)]
= ppart[j1+nppmx*(i+idimp*k)];
}
}
else {
ip[0] = 1;
}
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* write out counters */
j = threadIdx.x;
if (j < 8) {
ncl[j+8*k] = sncl[j];
}
/* set error */
if (threadIdx.x==0) {
if (ip[0] > 0)
ierr = ierr > sncl[7] ? ierr : sncl[7];
}
}
/* ppbuff overflow */
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuppord2l(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* this subroutine performs third step of a particle sort by x,y grid
in tiles of mx, my, where incoming particles from other tiles are
copied into ppart.
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
input: all except irc
output: ppart, kpic, irc
ppart[k][i][n] = i co-ordinate of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, npp, ncoff, i, j, k, ii, jj, kx, ky, ni, nh;
int nn, mm, ll, ip, j1, j2, kxl, kxr, kk, kl, kr;
int nths;
/* The sizes of the shared memory arrays are as follows: */
/* int ks[8], sip[8], sj[blockDim.x], sj1[1], ist[1]; */
int *ks, *sip, *sj, *sj1, *ist;
extern __shared__ int shm[];
ks = (int *)&shm[0];
sip = (int *)&shm[8];
sj = (int *)&shm[16];
sj1 = (int *)&shm[16+blockDim.x];
ist = (int *)&shm[17+blockDim.x];
mxy1 = mx1*my1;
/* k = tile number */
k = blockIdx.x + gridDim.x*blockIdx.y;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
if (k < mxy1) {
npp = kpic[k];
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
if (threadIdx.x==0) {
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
sj1[0] = 0;
ist[0] = 0;
}
/* synchronize threads */
__syncthreads();
/* find number of incoming particles */
kk = 0;
ncoff = 0;
ip = 0;
ii = threadIdx.x;
if (ii < 8) {
kk = ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*kk];
ip = ncl[ii+8*kk] - ncoff;
kk = ncoff + idimp*npbmx*kk;
sip[ii] = ip;
}
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sip,8);
ni = sip[7];
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
j1 = 0;
mm = (ni - 1)/(int) blockDim.x + 1;
for (nn = 0; nn < mm; nn++) {
j = threadIdx.x + blockDim.x*nn;
sj[threadIdx.x] = 0;
if (threadIdx.x==0)
sj[0] = sj1[0];
/* synchronize threads */
__syncthreads();
/* calculate offset for reading from particle buffer */
if (ii < 8) {
/* mark next location where direction ii changes */
jj = sip[ii] - blockDim.x*nn;
if ((jj >= 0) && (jj < blockDim.x)) {
if (ip > 0)
sj[jj] -= kk + ip;
}
}
/* synchronize threads */
__syncthreads();
/* calculate offset for reading from particle buffer */
if (ii < 8) {
/* mark location where direction ii starts */
jj -= ip;
if ((jj >= 0) && (jj < blockDim.x)) {
if (ip > 0)
sj[jj] += kk;
}
}
nths = ni - blockDim.x*nn;
if (nths > blockDim.x)
nths = blockDim.x;
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sj,nths);
/* save last value for next time */
if (threadIdx.x==0) {
jj = 0;
if (nths > 0)
jj = sj[nths-1];
sj1[0] = jj;
}
if (j < ni) {
/* insert incoming particles into holes */
if (j < nh) {
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + (j - nh);
}
if (j1 < nppmx) {
jj = sj[threadIdx.x];
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*(i+idimp*k)]
= ppbuff[j+jj+npbmx*i];
}
}
else {
ist[0] = 1;
}
}
/* synchronize threads */
__syncthreads();
}
/* update particle number if all holes have been filled */
jj = ni - nh;
if (jj > 0)
npp += jj;
/* fill up remaining holes in particle array with particles from end */
ip = nh - ni;
if (ip > 0) {
mm = (ip - 1)/(int) blockDim.x + 1;
kk = 0;
ll = 0;
/* loop over holes */
for (nn = 0; nn < mm; nn++) {
j = threadIdx.x + blockDim.x*nn;
/* j1 = locations of particles to fill holes, in decreasing order */
j1 = 0;
if (j < ip) {
j1 = npp - j - 1;
}
/* j2 = locations of holes at the end, in decreasing order */
j2 = 0;
jj = nh - ll - threadIdx.x;
if (jj > 0) {
j2 = ihole[2*(jj+(ntmax+1)*k)] - 1;
}
/* holes with locations greater than npp-ip do not need to be filled */
/* identify such holes */
sj[threadIdx.x] = 1;
/* synchronize threads */
__syncthreads();
/* omit particles at end that are holes */
ii = npp - (j2 + blockDim.x*nn) - 1;
if ((ii >= 0) && (ii < blockDim.x))
sj[ii] = 0;
nths = ip - blockDim.x*nn;
if (nths > blockDim.x)
nths = blockDim.x;
/* synchronize threads */
__syncthreads();
/* perform local prefix reduction */
liscan2(sj,nths);
/* ii = number particles at end to be moved */
ii = 0;
if (nths > 0)
ii = sj[nths-1];
/* identify which particles at end to be moved */
if (ii < nths) {
ncoff = 0;
if (j < ip) {
if (threadIdx.x > 0)
ncoff = sj[threadIdx.x-1];
jj = sj[threadIdx.x];
}
/* synchronize threads */
__syncthreads();
if (j < ip) {
if (jj > ncoff) {
sj[jj-1] = j1;
}
}
/* synchronize threads */
__syncthreads();
}
/* j2 = locations of holes to be filled in increasing order */
j2 = 0;
if (j < ip) {
j1 = npp - j - 1;
jj = threadIdx.x + ni + kk + 1;
if (jj <= nh)
j2 = ihole[2*(jj+(ntmax+1)*k)] - 1;
}
/* move particles from end into remaining holes */
if (j < (ii+blockDim.x*nn)) {
if (ii < nths)
j1 = sj[threadIdx.x];
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*(i+idimp*k)]
= ppart[j1+nppmx*(i+idimp*k)];
}
}
/* accumulate number of holes filled */
kk += ii;
/* accumulate number of holes skipped over */
ii = nths - ii;
ll += ii;
}
/* update number of particles */
npp -= ip;
}
/* set error and update particle */
if (threadIdx.x==0) {
/* ppart overflow */
if (ist[0] > 0)
*irc = npp;
kpic[k] = npp;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpupois23t(float2 qt[], float2 fxyt[], float2 ffct[],
float *we, int nx, int ny, int nxvh, int nyv,
int nxhd, int nyhd) {
/* this subroutine solves 2d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions, without packed data.
Zeros out z component.
vector length is second dimension
input: qt,ffct,nx,ny,nxvh,nyv,nxhd,nyhd, output: fxyt,we
approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
equation used is:
fx[kx][ky] = -sqrt(-1)*kx*g[kx][ky]*s[kx][ky]*q[kx][ky],
fy[kx][ky] = -sqrt(-1)*ky*g[kx][ky]*s[kx][ky]*q[kx][ky],
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[kx][ky] = (affp/(kx**2+ky**2))*s[kx][ky],
s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and
fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0.
qt[j][k] = complex charge density for fourier mode (k,j)
fxyt[j][0][k] = x component of complex force/charge,
fxyt[j][1][k] = y component of complex force/charge,
fxyt[j][2][k] = z component of complex force/charge,
all for fourier mode (k,j)
caimag(ffct[j][k]) = finite-size particle shape factor s
creal(ffct([j][k])) = potential green's function g
for fourier mode (k,j)
electric field energy is also calculated, using
we = nx*ny*sum((affp/(kx**2+ky**2))*|q[kx][ky]*s[kx][ky]|**2)
nx/ny = system length in x/y direction
nxvh = second dimension of field arrays, must be >= nxh+1
nyv = first dimension of field arrays, must be >= ny
nxhd = second dimension of form factor array, must be >= nxh
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, nxh1, j, k, k1, jj, jk, jk3;
float dnx, dny, dkx, at1, at2, at3, at4;
float2 zero, zt1, zt2, zt3;
/* The size of the shared memory array is as follows: */
/* float ss[blockDim.x]; */
extern __shared__ float ss[];
double wp;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero.x = 0.0f;
zero.y = 0.0f;
/* calculate force/charge and sum field energy */
wp = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
jj = nyhd*j;
jk = nyv*j;
jk3 = 3*jk;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = zt1.x*zt1.y;
at2 = at1*dkx;
at3 = at1*dny*(float) k;
zt1 = qt[k+jk];
at4 = zt1.x;
zt1.x = zt1.y;
zt1.y = -at4;
zt2 = qt[k1+jk];
at4 = zt2.x;
zt2.x = zt2.y;
zt2.y = -at4;
zt3.x = at2*zt1.x;
zt3.y = at2*zt1.y;
fxyt[k+jk3] = zt3;
zt3.x = at3*zt1.x;
zt3.y = at3*zt1.y;
fxyt[k+nyv+jk3] = zt3;
fxyt[k+2*nyv+jk3] = zero;
zt3.x = at2*zt2.x;
zt3.y = at2*zt2.y;
fxyt[k1+jk3] = zt3;
zt3.x = -at3*zt2.x;
zt3.y = -at3*zt2.y;
fxyt[k1+nyv+jk3] = zt3;
fxyt[k1+2*nyv+jk3] = zero;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y));
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jj = nyhd*j;
jk = nyv*j;
jk3 = 3*jk;
zt1 = ffct[jj];
at1 = zt1.x*zt1.y;
at2 = at1*dnx*(float) j;
zt1 = qt[jk];
at4 = zt1.x;
zt3.x = at2*zt1.y;
zt3.y = -at2*at4;
fxyt[jk3] = zt3;
fxyt[nyv+jk3] = zero;
fxyt[2*nyv+jk3] = zero;
fxyt[k1+jk3] = zero;
fxyt[k1+nyv+jk3] = zero;
fxyt[k1+2*nyv+jk3] = zero;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y));
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k];
at1 = zt1.x*zt1.y;
at3 = at1*dny*(float) k;
zt1 = qt[k];
at4 = zt1.x;
zt3.x = at3*zt1.y;
zt3.y = -at3*at4;
fxyt[k] = zero;
fxyt[k+nyv] = zt3;
fxyt[k+2*nyv] = zero;
fxyt[k1] = zero;
zt3.y = -zt3.y;
fxyt[k1+nyv] = zt3;
fxyt[k1+2*nyv] = zero;
fxyt[k+nxh1] = zero;
fxyt[k+nyv+nxh1] = zero;
fxyt[k+2*nyv+nxh1] = zero;
fxyt[k1+nxh1] = zero;
fxyt[k1+nyv+nxh1] = zero;
fxyt[k1+2*nyv+nxh1] = zero;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y));
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
fxyt[0] = zero;
fxyt[nyv] = zero;
fxyt[2*nyv] = zero;
fxyt[k1] = zero;
fxyt[k1+nyv] = zero;
fxyt[k1+2*nyv] = zero;
fxyt[nxh1] = zero;
fxyt[nxh1+nyv] = zero;
fxyt[nxh1+2*nyv] = zero;
fxyt[k1+nxh1] = zero;
fxyt[k1+nyv+nxh1] = zero;
fxyt[k1+2*nyv+nxh1] = zero;
}
}
j = blockIdx.x;
if (j <= nxh) {
/* sum potential energies for each x co-ordinate */
ss[threadIdx.x] = (float) wp;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize potential energy for each x co-ordinate */
if (threadIdx.x==0)
we[j] = ss[0]*((float) (nx*ny));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpucuperp2t(float2 cut[], int nx, int ny, int nxvh,
int nyv) {
/* this subroutine calculates the transverse current in fourier space
without packed data.
input: all, output: cut
approximate flop count is: 36*nxc*nyc
and nxc*nyc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1
the transverse current is calculated using the equation:
cux[ky][kx] = cux[ky][kx]
-kx*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
cuy[ky][kx] = cuy[ky][kx]
-ky*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0,
and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0.
cut[j][i][k] = complex current density for fourier mode (k,j)
nx/ny = system length in x/y direction
nxvh = third dimension of current array, must be >= nxh
nyv = first dimension of current array, must be >= ny
local data */
int nxh, nyh, nxh1, j, k, k1, jk3;
float dnx, dny, dkx, dkx2, dky, at1;
float2 zero, zt1, zt2, zt3;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero.x = 0.0f;
zero.y = 0.0f;
/* calculate transverse part of current */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
dkx2 = dkx*dkx;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
dky = dny*(float) k;
at1 = 1.0f/(dkx2 + dky*dky);
zt1 = cut[k+jk3];
zt2.x = dkx*zt1.x;
zt2.y = dkx*zt1.y;
zt3 = cut[k+nyv+jk3];
zt2.x = at1*(zt2.x + dky*zt3.x);
zt2.y = at1*(zt2.y + dky*zt3.y);
zt1.x -= dkx*zt2.x;
zt1.y -= dkx*zt2.y;
zt3.x -= dky*zt2.x;
zt3.y -= dky*zt2.y;
cut[k+jk3] = zt1;
cut[k+nyv+jk3] = zt3;
zt1 = cut[k1+jk3];
zt2.x = dkx*zt1.x;
zt2.y = dkx*zt1.y;
zt3 = cut[k1+nyv+jk3];
zt2.x = at1*(zt2.x - dky*zt3.x);
zt2.y = at1*(zt2.y - dky*zt3.y);
zt1.x -= dkx*zt2.x;
zt1.y -= dkx*zt2.y;
zt3.x += dky*zt2.x;
zt3.y += dky*zt2.y;
cut[k1+jk3] = zt1;
cut[k1+nyv+jk3] = zt3;
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jk3 = 3*nyv*j;
cut[jk3] = zero;
cut[k1+jk3] = zero;
cut[k1+nyv+jk3] = zero;
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = cut[k];
cut[k+nyv] = zero;
zt1.y = -zt1.y;
cut[k1] = zt1;
cut[k1+nyv] = zero;
cut[k+nxh1] = zero;
cut[k+nyv+nxh1] = zero;
cut[k1+nxh1] = zero;
cut[k1+nyv+nxh1] = zero;
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
cut[0] = zero;
cut[nyv] = zero;
cut[k1] = zero;
cut[k1+nyv] = zero;
cut[nxh1] = zero;
cut[nyv+nxh1] = zero;
cut[k1+nxh1] = zero;
cut[k1+nyv+nxh1] = zero;
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuibpois23t(float2 cut[], float2 bxyt[], float2 ffct[],
float ci, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
magnetic field, with periodic boundary conditions,
without packed data.
input: cut,ffct,ci,nx,ny,nxv,nyhd, output: bxyt,wm
approximate flop count is: 90*nxc*nyc + 40*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is calculated using the equations:
bx[kx][ky] = ci*ci*sqrt(-1)*g[kx][ky]*ky*cuz[[kx][ky],
by[kx][ky] = -ci*ci*sqrt(-1)*g[kx][ky]*kx*cuz[kx][ky],
bz[kx][ky] = ci*ci*sqrt(-1)*g[kx][ky]*(kx*cuy[kx][ky]-ky*cux[kx][ky]),
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[kx][ky] = (affp/(kx**2+ky**2))*s[ky][kx],
s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = bx(ky=pi) = by(ky=pi) = bz(ky=pi)
= 0, and bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0.
cut[j][i][k] = complex current density for fourier mode (k,j)
bxyt[j][i][k] = i component of complex magnetic field
all for fourier mode (k,j)
cimag(ffct[j][k]) = finite-size particle shape factor s
creal(ffct[j][k]) = potential green's function g
for fourier mode (j,k)
ci = reciprical of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci*
|cu[kx][ky]*s[kx][ky]|**2), where
affp = normalization constant = nx*ny/np, where np=number of particles
this expression is valid only if the current is divergence-free
nx/ny = system length in x/y direction
nxvh = third dimension of field arrays, must be >= nxh
nyv = first dimension of field arrays, must be >= ny
nxhd = second dimension of form factor array, must be >= nxh
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, nxh1,j, k, k1, jj, jk3;
float dnx, dny, dkx, ci2, at1, at2, at3, at4;
float2 zero, zt1, zt2, zt3;
/* The size of the shared memory array is as follows: */
/* float ss[blockDim.x]; */
extern __shared__ float ss[];
double wp;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero.x = 0.0f;
zero.y = 0.0f;
ci2 = ci*ci;
/* calculate magnetic field and sum field energy */
wp = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = ci2*zt1.x;
at2 = dkx*at1;
at3 = at1*dny*(float) k;
at1 = at1*zt1.y;
zt1 = cut[k+2*nyv+jk3];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt2 = cut[k+nyv+jk3];
at4 = zt2.x;
zt2.x = -zt2.y;
zt2.y = at4;
zt3 = cut[k+jk3];
at4 = zt3.x;
zt3.x = -zt3.y;
zt3.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y + zt3.x*zt3.x + zt3.y*zt3.y));
zt3.x = at2*zt2.x - at3*zt3.x;
zt3.y = at2*zt2.y - at3*zt3.y;
zt2.x = -at2*zt1.x;
zt2.y = -at2*zt1.y;
zt1.x = at3*zt1.x;
zt1.y = at3*zt1.y;
bxyt[k+jk3] = zt1;
bxyt[k+nyv+jk3] = zt2;
bxyt[k+2*nyv+jk3] = zt3;
zt1 = cut[k1+2*nyv+jk3];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt2 = cut[k1+nyv+jk3];
at4 = zt2.x;
zt2.x = -zt2.y;
zt2.y = at4;
zt3 = cut[k1+jk3];
at4 = zt3.x;
zt3.x = -zt3.y;
zt3.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y + zt3.x*zt3.x + zt3.y*zt3.y));
zt3.x = at2*zt2.x + at3*zt3.x;
zt3.y = at2*zt2.y + at3*zt3.y;
zt2.x = -at2*zt1.x;
zt2.y = -at2*zt1.y;
zt1.x = -at3*zt1.x;
zt1.y = -at3*zt1.y;
bxyt[k1+jk3] = zt1;
bxyt[k1+nyv+jk3] = zt2;
bxyt[k1+2*nyv+jk3] = zt3;
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
at1 = ci2*zt1.x;
at2 = at1*dnx*(float) j;
at1 = at1*zt1.y;
zt1 = cut[2*nyv+jk3];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt2 = cut[nyv+jk3];
at4 = zt2.x;
zt2.x = -zt2.y;
zt2.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt2.x*zt2.x + zt2.y*zt2.y));
zt3.x = at2*zt2.x;
zt3.y = at2*zt2.y;
zt2.x = -at2*zt1.x;
zt2.y = -at2*zt1.y;
bxyt[jk3] = zero;
bxyt[nyv+jk3] = zt2;
bxyt[2*nyv+jk3] = zt3;
bxyt[k1+jk3] = zero;
bxyt[k1+nyv+jk3] = zero;
bxyt[k1+2*nyv+jk3] = zero;
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k];
at1 = ci2*zt1.x;
at3 = at1*dny*(float) k;
at1 = at1*zt1.y;
zt1 = cut[k+2*nyv];
at4 = zt1.x;
zt1.x = -zt1.y;
zt1.y = at4;
zt3 = cut[k];
at4 = zt3.x;
zt3.x = -zt3.y;
zt3.y = at4;
wp += (double) (at1*(zt1.x*zt1.x + zt1.y*zt1.y
+ zt3.x*zt3.x + zt3.y*zt3.y));
zt3.x = -at3*zt3.x;
zt3.y = -at3*zt3.y;
zt1.x = at3*zt1.x;
zt1.y = at3*zt1.y;
bxyt[k] = zt1;
bxyt[k+nyv] = zero;
bxyt[k+2*nyv] = zt3;
zt1.y = -zt1.y;
zt3.y = -zt3.y;
bxyt[k1] = zt1;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zt3;
bxyt[k+nxh1] = zero;
bxyt[k+nyv+nxh1] = zero;
bxyt[k+2*nyv+nxh1] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
bxyt[0] = zero;
bxyt[nyv] = zero;
bxyt[2*nyv] = zero;
bxyt[k1] = zero;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zero;
bxyt[nxh1] = zero;
bxyt[nxh1+nyv] = zero;
bxyt[nxh1+2*nyv] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
}
}
j = blockIdx.x;
if (j <= nxh) {
/* sum magnetic energies for each x co-ordinate */
ss[threadIdx.x] = (float) wp;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize magnetic energy for each x co-ordinate */
if (threadIdx.x==0)
wm[j] = ss[0]*((float) (nx*ny));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpumaxwel2t(float2 exyt[], float2 bxyt[], float2 cut[],
float2 ffct[], float ci, float dt,
float *wf, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d maxwell's equation in fourier space for
transverse electric and magnetic fields with periodic boundary
conditions, without packed data.
input: all, output: wf, wm, exy, bxy
approximate flop count is: 286*nxc*nyc + 84*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is first updated half a step using the equations:
bx[kx][ky] = bx[kx][ky] - .5*dt*sqrt(-1)*ky*ez[kx][ky]
by[kx][ky] = by[kx][ky] + .5*dt*sqrt(-1)*kx*ez[kx][ky]
bz[kx][ky] = bz[kx][ky] - .5*dt*sqrt(-1)*(kx*ey[kx][ky]-ky*ex[kx][ky])
the electric field is then updated a whole step using the equations:
ex[kx][ky] = ex[kx][ky] + c2*dt*sqrt(-1)*ky*bz[kx][ky]
- affp*dt*cux[kx][ky]*s[kx][ky]
ey[kx][ky] = ey[kx][ky] - c2*dt*sqrt(-1)*kx*bz[kx][ky]
- affp*dt*cuy[kx][ky]*s[kx][ky]
ez[kx][ky] = ez[kx][ky] + c2*dt*sqrt(-1)*(kx*by[kx][ky]-ky*bx[kx][ky])
- affp*dt*cuz[kx][ky]*s[kx][ky]
the magnetic field is finally updated the remaining half step with
the new electric field and the previous magnetic field equations.
where kx = 2pi*j/nx, ky = 2pi*k/ny, c2 = 1./(ci*ci)
and s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)
j,k = fourier mode numbers, except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0,
ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0.
and similarly for bx, by, bz.
cut[j][i][k] = complex current density
exyt[j][i][k] = complex transverse electric field
bxyt[j][i][k] = complex magnetic field
for component i, all for fourier mode (k,j)
creal(ffct[0][0]) = affp = normalization constant = nx*ny/np,
where np=number of particles
cimag(ffct[j][k]) = finite-size particle shape factor s,
s[kx][ky] = exp(-((kx*ax)**2+(ky*ay)**2)/2)
for fourier mode (k,j)
ci = reciprical of velocity of light
dt = time interval between successive calculations
transverse electric field energy is also calculated, using
wf = nx*ny**sum((1/affp)*|exy[kx][ky]|**2)
magnetic field energy is also calculated, using
wm = nx*ny**sum((c2/affp)*|bxy[kx][ky]|**2)
nx/ny = system length in x/y direction
nxvh = third dimension of field arrays, must be >= nxh
nyv = first dimension of field arrays, must be >= ny
nxhd = second dimension of form factor array, must be >= nxh
nyhd = first dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, nxh1, j, k, k1, jj, jk3;
float dnx, dny, dth, c2, cdt, affp, anorm, dkx, dky, afdt, adt;
float2 zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9;
float2 ct1, ct2, ct3;
/* The size of the shared memory array is as follows: */
/* float ss[blockDim.x]; */
extern __shared__ float ss[];
double wp, ws;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dth = 0.5f*dt;
c2 = 1.0f/(ci*ci);
cdt = c2*dt;
zt1 = ffct[0];
affp = zt1.x;
adt = affp*dt;
zero.x = 0.0f;
zero.y = 0.0f;
anorm = 1.0f/affp;
/* update electromagnetic field and sum field energies */
ws = 0.0;
wp = 0.0;
/* calculate the electromagnetic fields */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
/* for (j = 1; j < nxh; j++) { */
j = blockIdx.x;
if ((j > 0) && (j < nxh)) {
dkx = dnx*(float) j;
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
dky = dny*(float) k;
zt1 = ffct[k+jj];
afdt = adt*zt1.y;
/* update magnetic field half time step, ky > 0 */
ct3 = exyt[k+2*nyv+jk3];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct2 = exyt[k+nyv+jk3];
zt2.x = -ct2.y;
zt2.y = ct2.x;
ct1 = exyt[k+jk3];
zt3.x = -ct1.y;
zt3.y = ct1.x;
zt4 = bxyt[k+jk3];
zt5 = bxyt[k+nyv+jk3];
zt6 = bxyt[k+2*nyv+jk3];
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x - dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y - dky*zt3.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt2.x = -zt5.y;
zt2.y = zt5.x;
zt3.x = -zt4.y;
zt3.y = zt4.x;
zt7 = cut[k+jk3];
zt8 = cut[k+nyv+jk3];
zt9 = cut[k+2*nyv+jk3];
zt7.x = ct1.x + cdt*(dky*zt1.x) - afdt*zt7.x;
zt7.y = ct1.y + cdt*(dky*zt1.y) - afdt*zt7.y;
zt8.x = ct2.x - cdt*(dkx*zt1.x) - afdt*zt8.x;
zt8.y = ct2.y - cdt*(dkx*zt1.y) - afdt*zt8.y;
zt9.x = ct3.x + cdt*(dkx*zt2.x - dky*zt3.x) - afdt*zt9.x;
zt9.y = ct3.y + cdt*(dkx*zt2.y - dky*zt3.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt2.x = -zt8.y;
zt2.y = zt8.x;
zt3.x = -zt7.y;
zt3.y = zt7.x;
exyt[k+jk3] = zt7;
exyt[k+nyv+jk3] = zt8;
exyt[k+2*nyv+jk3] = zt9;
ws += (double) (anorm*(zt7.x*zt7.x + zt7.y*zt7.y
+ zt8.x*zt8.x + zt8.y*zt8.y + zt9.x*zt9.x + zt9.y*zt9.y));
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x - dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y - dky*zt3.y);
bxyt[k+jk3] = zt4;
bxyt[k+nyv+jk3] = zt5;
bxyt[k+2*nyv+jk3] = zt6;
wp += (double) (anorm*(zt4.x*zt4.x + zt4.y*zt4.y
+ zt5.x*zt5.x + zt5.y*zt5.y + zt6.x*zt6.x + zt6.y*zt6.y));
/* update magnetic field half time step, ky < 0 */
ct3 = exyt[k1+2*nyv+jk3];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct2 = exyt[k1+nyv+jk3];
zt2.x = -ct2.y;
zt2.y = ct2.x;
ct1 = exyt[k1+jk3];
zt3.x = -ct1.y;
zt3.y = ct1.x;
zt4 = bxyt[k1+jk3];
zt5 = bxyt[k1+nyv+jk3];
zt6 = bxyt[k1+2*nyv+jk3];
zt4.x += dth*(dky*zt1.x);
zt4.y += dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x + dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y + dky*zt3.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt2.x = -zt5.y;
zt2.y = zt5.x;
zt3.x = -zt4.y;
zt3.y = zt4.x;
zt7 = cut[k1+jk3];
zt8 = cut[k1+nyv+jk3];
zt9 = cut[k1+2*nyv+jk3];
zt7.x = ct1.x - cdt*(dky*zt1.x) - afdt*zt7.x;
zt7.y = ct1.y - cdt*(dky*zt1.y) - afdt*zt7.y;
zt8.x = ct2.x - cdt*(dkx*zt1.x) - afdt*zt8.x;
zt8.y = ct2.y - cdt*(dkx*zt1.y) - afdt*zt8.y;
zt9.x = ct3.x + cdt*(dkx*zt2.x + dky*zt3.x) - afdt*zt9.x;
zt9.y = ct3.y + cdt*(dkx*zt2.y + dky*zt3.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt2.x = -zt8.y;
zt2.y = zt8.x;
zt3.x = -zt7.y;
zt3.y = zt7.x;
exyt[k1+jk3] = zt7;
exyt[k1+nyv+jk3] = zt8;
exyt[k1+2*nyv+jk3] = zt9;
ws += (double) (anorm*(zt7.x*zt7.x + zt7.y*zt7.y
+ zt8.x*zt8.x + zt8.y*zt8.y + zt9.x*zt9.x + zt9.y*zt9.y));
zt4.x += dth*(dky*zt1.x);
zt4.y += dth*(dky*zt1.y);
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x + dky*zt3.x);
zt6.y -= dth*(dkx*zt2.y + dky*zt3.y);
bxyt[k1+jk3] = zt4;
bxyt[k1+nyv+jk3] = zt5;
bxyt[k1+2*nyv+jk3] = zt6;
wp += (double) (anorm*(zt4.x*zt4.x + zt4.y*zt4.y
+ zt5.x*zt5.x + zt5.y*zt5.y + zt6.x*zt6.x + zt6.y*zt6.y));
}
k += blockDim.x;
}
}
/* mode numbers ky = 0, ny/2 */
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 1; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
if (j > 0) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
dkx = dnx*(float) j;
afdt = adt*zt1.y;
/* update magnetic field half time step */
ct3 = exyt[2*nyv+jk3];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct2 = exyt[nyv+jk3];
zt2.x = -ct2.y;
zt2.y = ct2.x;
zt5 = bxyt[nyv+jk3];
zt6 = bxyt[2*nyv+jk3];
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x);
zt6.y -= dth*(dkx*zt2.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt2.x = -zt5.y;
zt2.y = zt5.x;
zt8 = cut[nyv+jk3];
zt9 = cut[2*nyv+jk3];
zt8.x = ct2.x - cdt*(dkx*zt1.x) - afdt*zt8.x;
zt8.y = ct2.y - cdt*(dkx*zt1.y) - afdt*zt8.y;
zt9.x = ct3.x + cdt*(dkx*zt2.x) - afdt*zt9.x;
zt9.y = ct3.y + cdt*(dkx*zt2.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt2.x = -zt8.y;
zt2.y = zt8.x;
exyt[jk3] = zero;
exyt[nyv+jk3] = zt8;
exyt[2*nyv+jk3] = zt9;
ws += (double) (anorm*(zt8.x*zt8.x + zt8.y*zt8.y
+ zt9.x*zt9.x + zt9.y*zt9.y));
zt5.x += dth*(dkx*zt1.x);
zt5.y += dth*(dkx*zt1.y);
zt6.x -= dth*(dkx*zt2.x);
zt6.y -= dth*(dkx*zt2.y);
bxyt[jk3] = zero;
bxyt[nyv+jk3] = zt5;
bxyt[2*nyv+jk3] = zt6;
wp += (double) (anorm*(zt5.x*zt5.x + zt5.y*zt5.y
+ zt6.x*zt6.x + zt6.y*zt6.y));
bxyt[k1+jk3] = zero;
bxyt[k1+nyv+jk3] = zero;
bxyt[k1+2*nyv+jk3] = zero;
exyt[k1+jk3] = zero;
exyt[k1+nyv+jk3] = zero;
exyt[k1+2*nyv+jk3] = zero;
}
j += blockDim.x;
}
/* mode numbers kx = 0, nx/2 */
nxh1 = 3*nyv*nxh;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
dky = dny*(float) k;
zt1 = ffct[k];
afdt = adt*zt1.y;
/* update magnetic field half time step */
ct3 = exyt[k+2*nyv];
zt1.x = -ct3.y;
zt1.y = ct3.x;;
ct1 = exyt[k];
zt3.x = -ct1.y;
zt3.y = ct1.x;
zt4 = bxyt[k];
zt6 = bxyt[k+2*nyv];
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt6.x += dth*(dky*zt3.x);
zt6.y += dth*(dky*zt3.y);
/* update electric field whole time step */
zt1.x = -zt6.y;
zt1.y = zt6.x;
zt3.x = -zt4.y;
zt3.y = zt4.x;
zt7 = cut[k];
zt9 = cut[k+2*nyv];
zt7.x = ct1.x + cdt*(dky*zt1.x) - afdt*zt7.x;
zt7.y = ct1.y + cdt*(dky*zt1.y) - afdt*zt7.y;
zt9.x = ct3.x - cdt*(dky*zt3.x) - afdt*zt9.x;
zt9.y = ct3.y - cdt*(dky*zt3.y) - afdt*zt9.y;
/* update magnetic field half time step and store electric field */
zt1.x = -zt9.y;
zt1.y = zt9.x;
zt3.x = -zt7.y;
zt3.y = zt7.x;
exyt[k] = zt7;
exyt[k+nyv] = zero;
exyt[k+2*nyv] = zt9;
ws += (double) (anorm*(zt7.x*zt7.x + zt7.y*zt7.y
+ zt9.x*zt9.x + zt9.y*zt9.y));
zt4.x -= dth*(dky*zt1.x);
zt4.y -= dth*(dky*zt1.y);
zt6.x += dth*(dky*zt3.x);
zt6.y += dth*(dky*zt3.y);
bxyt[k] = zt4;
bxyt[k+nyv] = zero;
bxyt[k+2*nyv] = zt6;
wp += (double) (anorm*(zt4.x*zt4.x + zt4.y*zt4.y
+ zt6.x*zt6.x + zt6.y*zt6.y));
zt4.y = -zt4.y;
zt6.y = -zt6.y;
zt7.y = -zt7.y;
zt9.y = -zt9.y;
bxyt[k1] = zt4;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zt6;
exyt[k1] = zt7;
exyt[k1+nyv] = zero;
exyt[k1+2*nyv] = zt9;
bxyt[k+nxh1] = zero;
bxyt[k+nyv+nxh1] = zero;
bxyt[k+2*nyv+nxh1] = zero;
exyt[k+nxh1] = zero;
exyt[k+nyv+nxh1] = zero;
exyt[k+2*nyv+nxh1] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
exyt[k1+nxh1] = zero;
exyt[k1+nyv+nxh1] = zero;
exyt[k1+2*nyv+nxh1] = zero;
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
bxyt[0] = zero;
bxyt[nyv] = zero;
bxyt[2*nyv] = zero;
exyt[0] = zero;
exyt[nyv] = zero;
exyt[2*nyv] = zero;
bxyt[k1] = zero;
bxyt[k1+nyv] = zero;
bxyt[k1+2*nyv] = zero;
exyt[k1] = zero;
exyt[k1+nyv] = zero;
exyt[k1+2*nyv] = zero;
bxyt[nxh1] = zero;
bxyt[nyv+nxh1] = zero;
bxyt[2*nyv+nxh1] = zero;
exyt[nxh1] = zero;
exyt[nyv+nxh1] = zero;
exyt[2*nyv+nxh1] = zero;
bxyt[k1+nxh1] = zero;
bxyt[k1+nyv+nxh1] = zero;
bxyt[k1+2*nyv+nxh1] = zero;
exyt[k1+nxh1] = zero;
exyt[k1+nyv+nxh1] = zero;
exyt[k1+2*nyv+nxh1] = zero;
}
}
j = blockIdx.x;
if (j <= nxh) {
/* sum transverse electric field energies for each x co-ordinate */
ss[threadIdx.x] = (float) ws;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize transverse electric field energy for each x co-ordinate */
if (threadIdx.x==0)
wf[j] = ss[0]*((float) (nx*ny));
/* sum magnetic energies for each x co-ordinate */
ss[threadIdx.x] = (float) wp;
/* synchronize threads */
__syncthreads();
lsum2(ss,blockDim.x);
/* normalize magnetic energy for each x co-ordinate */
if (threadIdx.x==0)
wm[j] = c2*ss[0]*((float) (nx*ny));
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuemfield2t(float2 fxyt[], float2 exyt[],
float2 ffct[], int isign, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine either adds complex vector fields if isign > 0
or copies complex vector fields if isign < 0
includes additional smoothing
local data */
int i, j, k, nxh, nyh, nxh1, k1, jj, jk3;
float at1;
float2 zero, zt1, zt2;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nxh1 = 3*nyv*nxh;
zero.x = 0.0f;
zero.y = 0.0f;
/* add the fields */
if (isign > 0) {
/* for (j = 0; j < nxh; j++) { */
j = blockIdx.x;
if (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[k+nyv*i+jk3];
zt2 = fxyt[k+nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[k+nyv*i+jk3] = zt2;
zt1 = exyt[k1+nyv*i+jk3];
zt2 = fxyt[k1+nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt2;
}
}
k += blockDim.x;
}
}
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[nyv*i+jk3];
zt2 = fxyt[nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[nyv*i+jk3] = zt2;
zt1 = exyt[k1+nyv*i+jk3];
zt2 = fxyt[k1+nyv*i+jk3];
zt2.x += at1*zt1.x;
zt2.y += at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt2;
}
j += blockDim.x;
}
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
for (i = 0; i < 3; i++) {
fxyt[k+nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
for (i = 0; i < 3; i++) {
fxyt[nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
}
}
/* copy the fields */
else if (isign < 0) {
/* for (j = 0; j < nxh; j++) { */
j = blockIdx.x;
if (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
zt1 = ffct[k+jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[k+nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[k+nyv*i+jk3] = zt1;
zt1 = exyt[k1+nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt1;
}
}
k += blockDim.x;
}
}
if (blockIdx.x==0) {
k1 = nyh;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
jj = nyhd*j;
jk3 = 3*nyv*j;
zt1 = ffct[jj];
at1 = zt1.y;
for (i = 0; i < 3; i++) {
zt1 = exyt[nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[nyv*i+jk3] = zt1;
zt1 = exyt[k1+nyv*i+jk3];
zt1.x = at1*zt1.x;
zt1.y = at1*zt1.y;
fxyt[k1+nyv*i+jk3] = zt1;
}
j += blockDim.x;
}
/* for (k = 1; k < nyh; k++) { */
k = threadIdx.x;
while (k < nyh) {
if (k > 0) {
k1 = ny - k;
for (i = 0; i < 3; i++) {
fxyt[k+nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
k += blockDim.x;
}
if (threadIdx.x==0) {
k1 = nyh;
for (i = 0; i < 3; i++) {
fxyt[nyv*i+nxh1] = zero;
fxyt[k1+nyv*i+nxh1] = zero;
}
}
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuctpose4(float2 f[], float2 g[], int nx, int ny,
int nxv, int nyv) {
/* complex transpose using blocking algorithm with gaps */
/* local data */
int j, k, js, ks, joff, koff, mx, mxv;
/* The size of the shared memory array is as follows: */
/* float2 shm2[(mx + 1)*mx]; */
extern __shared__ float2 shm2[];
mx = blockDim.x;
mxv = mx + 1;
joff = mx*blockIdx.x;
koff = mx*blockIdx.y;
js = threadIdx.x;
ks = threadIdx.y;
/* copy into block */
j = js + joff;
k = ks + koff;
if ((j < nx) && (k < ny)) {
shm2[js+mxv*ks] = f[j+nxv*k];
}
__syncthreads();
/* copy out from block */
j = ks + joff;
k = js + koff;
if ((j < nx) && (k < ny)) {
g[k+nyv*j] = shm2[ks+mxv*js];
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpuctpose4n(float2 fn[], float2 gn[], int nx, int ny,
int ndim, int nxv, int nyv) {
/* complex vector transpose using blocking algorithm with gaps */
/* ndim = vector dimension */
/* local data */
int i, j, k, js, ks, joff, koff, mx, mxv, nmxv, nnxv, nnyv, jj, kk;
/* The size of the shared memory array is as follows: */
/* float2 shmn2[ndim*(mx + 1)*mx]; */
extern __shared__ float2 shmn2[];
mx = blockDim.x;
mxv = mx + 1;
joff = mx*blockIdx.x;
koff = mx*blockIdx.y;
js = threadIdx.x;
ks = threadIdx.y;
nmxv = ndim*mxv;
nnxv = ndim*nxv;
nnyv = ndim*nyv;
/* copy into block */
j = js + joff;
k = ks + koff;
if ((j < nx) && (k < ny)) {
jj = j + nnxv*k;
kk = js + nmxv*ks;
for (i = 0; i < ndim; i++) {
shmn2[kk+mxv*i] = fn[jj+nxv*i];
}
}
__syncthreads();
/* copy out from block */
j = ks + joff;
k = js + koff;
if ((j < nx) && (k < ny)) {
kk = k + nnyv*j;
jj = ks + nmxv*js;
for (i = 0; i < ndim; i++) {
gn[kk+nyv*i] = shmn2[jj+mxv*i];
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpufft2rcxs(float2 f[], int isign, int mixup[],
float2 sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd,
int nxyhd, int nsize) {
/* this subroutine performs the x part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic.
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in x is performed
f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx))
if isign = 1, a forward fourier transform in x is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f >= nx/2+1
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
nsize = amount of scratch complex memory used
fourier coefficients are stored as follows:
f[k][j].x, f[k][j].y = real, imaginary part of mode j,k, where
0 <= j < nx/2+1 and 0 <= k < ny
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, jj, kk;
int n, nn, in, nt, nh;
float ani, at1, at2;
float2 t1, t2, t3;
/* The size of the shared memory array is as follows: */
/* float2 s[nsize]; */
extern __shared__ float2 s[];
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
/* calculate extent of shared memory usage: */
/* nn = size of shared memory in x */
nn = nxh;
in = 0;
while (nn > nsize) {
nn = nn/2;
in += 1;
}
/* nt = number of iterations in x */
nt = 1L<<in;
in = indx1 - in;
nh = nn/2;
/* inverse fourier transform */
if (isign < 0) {
/* bit-reverse array elements in x */
nrx = nxhy/nxh;
/* for (k = nyi-1; k < nyt; k++) { */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
j1 = (mixup[j] - 1)/nrx;
if (j < j1) {
t1 = f[j1+jj];
f[j1+jj] = f[j+jj];
f[j+jj] = t1;
}
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* copy data to local memory */
nrx = nxy/nxh;
/* for (i = nyi-1; i < nyt; i++) { */
i = blockIdx.x + nyi - 1;
if (i < nyt) {
jj = nxhd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = f[kk+nn*n+jj];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in x */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
f[kk+nn*n+jj] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in x */
ns = 1L<<in;
for (l = in; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nxhh; kk++) { */
kk = threadIdx.x;
while (kk < nxhh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = f[j2+jj];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = f[j1+jj];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
f[j2+jj] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
f[j1+jj] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5f/(((float) nx)*((float) ny));
/* for (k = nyi-1; k < nyt; k++) */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 1; j < nxhh; j++) { */
j = threadIdx.x;
while (j < nxhh) {
if (j > 0) {
t3 = sct[kmr*j];
at1 = t3.y;
at2 = -t3.x;
t2 = f[nxh-j+jj];
t2.y = -t2.y;
t3 = f[j+jj];
t1.x = t3.x + t2.x;
t1.y = t3.y + t2.y;
t3.x -= t2.x;
t3.y -= t2.y;
t2.x = t3.x*at1 - t3.y*at2;
t2.y = t3.x*at2 + t3.y*at1;
t3.x = ani*(t1.x + t2.x);
t3.y = ani*(t1.y + t2.y);
f[j+jj] = t3;
t3.x = ani*(t1.x - t2.x);
t3.y = ani*(t2.y - t1.y);
f[nxh-j+jj] = t3;
}
j += blockDim.x;
}
if (threadIdx.x==0) {
ani = 2.0f*ani;
t3 = f[nxhh+jj];
t3.x = ani*t3.x;
t3.y = -ani*t3.y;
f[nxhh+jj] = t3;
t3 = f[jj];
at1 = t3.x;
at2 = t3.y;
t3.x = ani*(at1 - at2);
t3.y = 0.0f;
f[nxh+jj] = t3;
t3.x = ani*(at1 + at2);
f[jj] = t3;
}
/* synchronize threads */
__syncthreads();
}
}
/* forward fourier transform */
if (isign > 0) {
/* scramble coefficients */
kmr = nxy/nx;
/* for (k = nyi-1; k < nyt; k++) { */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 1; j < nxhh; j++) { */
j = threadIdx.x;
while (j < nxhh) {
if (j > 0) {
t3 = sct[kmr*j];
at1 = t3.y;
at2 = t3.x;
t2 = f[nxh-j+jj];
t2.y = -t2.y;
t3 = f[j+jj];
t1.x = t3.x + t2.x;
t1.y = t3.y + t2.y;
t3.x -= t2.x;
t3.y -= t2.y;
t2.x = t3.x*at1 - t3.y*at2;
t2.y = t3.x*at2 + t3.y*at1;
t3.x = t1.x + t2.x;
t3.y = t1.y + t2.y;
f[j+jj] = t3;
t3.x = t1.x - t2.x;
t3.y = t2.y - t1.y;
f[nxh-j+jj] = t3;
}
j += blockDim.x;
}
if (threadIdx.x==0) {
t3 = f[nxhh+jj];
t3.x = 2.0f*t3.x;
t3.y = -2.0f*t3.y;
f[nxhh+jj] = t3;
t3 = f[jj];
at1 = t3.x;
t3 = f[nxh+jj];
at2 = t3.x;
t3.x = at1 + at2;
t3.y = at1 - at2;
f[jj] = t3;
}
/* synchronize threads */
__syncthreads();
}
/* bit-reverse array elements in x */
nrx = nxhy/nxh;
/* for (k = nyi-1; k < nyt; k++) { */
k = blockIdx.x + nyi - 1;
if (k < nyt) {
jj = nxhd*k;
/* for (j = 0; j < nxh; j++) { */
j = threadIdx.x;
while (j < nxh) {
j1 = (mixup[j] - 1)/nrx;
if (j < j1) {
t1 = f[j1+jj];
f[j1+jj] = f[j+jj];
f[j+jj] = t1;
}
j += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* copy data to local memory */
nrx = nxy/nxh;
/* for (i = nyi-1; i < nyt; i++) { */
i = blockIdx.x + nyi - 1;
if (i < nyt) {
jj = nxhd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = f[kk+nn*n+jj];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in x */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
f[kk+nn*n+jj] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in x */
ns = 1L<<in;
for (l = in; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
/* for (kk = 0; kk < nxhh; kk++) { */
kk = threadIdx.x;
while (kk < nxhh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = f[j2+jj];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t2 = f[j1+jj];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
f[j2+jj] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
f[j1+jj] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpufft2rcys(float2 g[], int isign, int mixup[],
float2 sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd,
int nxyhd, int nsize) {
/* this subroutine performs the y part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of x,
using complex arithmetic, with data not packed
for isign = (-1,1), input: all, output: g
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in y is performed
g[n][m] = sum(g[j][k]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in y is performed
g[j][k] = sum(g[n][m]*exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = second dimension of g >= nx/2+1
nyd = first dimension of g >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
nsize = amount of scratch complex memory used
fourier coefficients are stored as follows:
g[j][k] = real, imaginary part of mode j,k, where
0 <= j < nx/2+1 and 0 <= k < ny
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, koff, kk;
int n, nn, in, nt, nh;
float at1, at2;
float2 t1, t2, t3;
/* The size of the shared memory array is as follows: */
/* float2 s[nsize]; */
extern __shared__ float2 s[];
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
/* calculate extent of shared memory usage: */
/* nn = size of shared memory in y */
nn = ny;
in = 0;
while (nn > nsize) {
nn = nn/2;
in += 1;
}
/* nt = number of iterations in y */
nt = 1L<<in;
in = indy - in;
nh = nn/2;
/* bit-reverse array elements in y */
nry = nxhy/ny;
/* for (j = nxi-1; j < nxt; j++) { */
j = blockIdx.x + nxi - 1;
if (j < nxt) {
kk = nyd*j;
/* for (k = 0; k < ny; k++) { */
k = threadIdx.x;
while (k < ny) {
k1 = (mixup[k] - 1)/nry;
if (k < k1) {
t1 = g[k1+kk];
g[k1+kk] = g[k+kk];
g[k+kk] = t1;
}
k += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
nry = nxy/ny;
/* inverse fourier transform in y */
if (isign < 0) {
/* copy data to local memory */
/* for (i = nxi-1; i < nxt; i++) { */
i = blockIdx.x + nxi - 1;
if (i < nxt) {
koff = nyd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = g[kk+nn*n+koff];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in y */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
g[kk+nn*n+koff] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in y */
ns = 1L<<in;
for (l = in; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nyh; kk++) { */
kk = threadIdx.x;
while (kk < nyh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = g[j2+koff];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = g[j1+koff];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
g[j2+koff] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
g[j1+koff] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
}
/* forward fourier transform in y */
if (isign > 0) {
/* copy data to local memory */
/* for (i = nxi-1; i < nxt; i++) { */
i = blockIdx.x + nxi - 1;
if (i < nxt) {
koff = nyd*i;
for (n = 0; n < nt; n++) {
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
s[kk] = g[kk+nn*n+koff];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
/* transform using local data in y */
ns = 1;
for (l = 0; l < in; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nh; kk++) { */
kk = threadIdx.x;
while (kk < nh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = s[j2];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = s[j1];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
s[j2] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
s[j1] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
/* copy data to global memory */
/* for (kk = 0; kk < nn; kk++) { */
kk = threadIdx.x;
while (kk < nn) {
g[kk+nn*n+koff] = s[kk];
kk += blockDim.x;
}
/* synchronize threads */
__syncthreads();
}
/* transform using global data in y */
ns = 1L<<in;
for (l = in; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
/* for (kk = 0; kk < nyh; kk++) { */
kk = threadIdx.x;
while (kk < nyh) {
k = kk/ns;
j = kk - ns*k;
k1 = ns2*k;
k2 = k1 + ns;
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t1.y = -t1.y;
t2 = g[j2+koff];
at1 = t1.x*t2.x - t1.y*t2.y;
at2 = t1.x*t2.y + t1.y*t2.x;
t3.x = t2.x - at1;
t3.y = t2.y - at2;
t2 = g[j1+koff];
t3.x = t2.x - at1;
t3.y = t2.y - at2;
g[j2+koff] = t3;
t3.x = t2.x + at1;
t3.y = t2.y + at2;
g[j1+koff] = t3;
kk += blockDim.x;
}
ns = ns2;
/* synchronize threads */
__syncthreads();
}
}
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpusum1(float a[], float *sa, int nx) {
/* 1d serial sum reduction */
/* nx = length of data */
/* sa = sum(a) */
/* local data */
int j, js, jb, mx, joff, mxm;
float t;
/* The size of the shared memory array is as follows: */
/* ss[blockDim.x]; */
extern __shared__ float ss[];
mx = blockDim.x;
js = threadIdx.x;
jb = blockIdx.x;
joff = mx*jb;
j = js + joff;
/* copy global data to shared memory */
if (j < nx) ss[js] = a[j];
/* synchronize to make sure each thread in block has the data */
__syncthreads();
if (js==0) {
mxm = nx - joff;
if (mxm > mx) mxm = mx;
/* perform serial local sum reduction: result in t */
t = 0.0f;
for (j = 0; j < mxm; j++) {
t += ss[j];
}
/* accumulate results to global memory for each block */
/* for devices with compute capability 2.x */
atomicAdd(&sa[0],t);
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpusum2(float a[], float d[], int nx) {
/* segmented 1d sum reductions, each of length mx = blockDim.x */
/* nx = length of data */
/* forall (j = 1:nbx); d(j) = sum(a(1+mx*(j-1):min(nx,mx*j))) */
/* local data */
int j, js, jb, mx, joff, mxm;
/* The size of the shared memory array is as follows: */
/* ss[blockDim.x]; */
extern __shared__ float ss[];
mx = blockDim.x;
js = threadIdx.x;
jb = blockIdx.x;
joff = mx*jb;
j = js + joff;
/* copy global data to shared memory */
if (j < nx) ss[js] = a[j];
/* synchronize to make sure each thread in block has the data */
__syncthreads();
mxm = nx - joff;
if (mxm > mx) mxm = mx;
/* perform parallel local sum reduction: result in ss[0] */
lsum2(ss,mxm);
/* write out result to global memory for each block */
if (js==0) d[jb] = ss[0];
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpubppush23l(float *ppart, float *fxy, float *bxy,
int *kpic, float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ipbc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
crc = cudaGetLastError();
gpubppush23l<<<dimGrid,dimBlock,ns>>>(ppart,fxy,bxy,kpic,qbm,dt,dtc,
ek,idimp,nppmx,nx,ny,mx,my,nxv,
nyv,mx1,mxy1,ipbc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpubppush23l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpubppushf23l(float *ppart, float *fxy, float *bxy,
int *kpic, int *ncl, int *ihole,
float qbm, float dt, float dtc,
float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float) + nblock_size*sizeof(int);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
ns += 9*sizeof(int);
crc = cudaGetLastError();
gpubppushf23l<<<dimGrid,dimBlock,ns>>>(ppart,fxy,bxy,kpic,ncl,ihole,
qbm,dt,dtc,ek,idimp,nppmx,nx,
ny,mx,my,nxv,nyv,mx1,mxy1,
ntmax,irc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpubppushf23l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppush23l(float ppart[], float fxy[], float bxy[],
int kpic[], float qbm, float dt,
float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny,
int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ipbc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
crc = cudaGetLastError();
gpurbppush23l<<<dimGrid,dimBlock,ns>>>(ppart,fxy,bxy,kpic,qbm,dt,dtc,
ci,ek,idimp,nppmx,nx,ny,mx,my,
nxv,nyv,mx1,mxy1,ipbc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpurbppush23l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppushf23l(float *ppart, float *fxy, float *bxy,
int *kpic, int *ncl, int *ihole,
float qbm, float dt, float dtc,
float ci, float *ek, int idimp,
int nppmx, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* Push Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 6*(mx + 1)*(my + 1)*sizeof(float) + nblock_size*sizeof(int);
n = nblock_size*sizeof(float);
ns = ns > n ? ns : n;
ns += 9*sizeof(int);
crc = cudaGetLastError();
gpurbppushf23l<<<dimGrid,dimBlock,ns>>>(ppart,fxy,bxy,kpic,ncl,ihole,
qbm,dt,dtc,ci,ek,idimp,nppmx,
nx,ny,mx,my,nxv,nyv,mx1,mxy1,
ntmax,irc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpurbppushf23l error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2ppost2l(float *ppart, float *q, int *kpic,
float qm, int nppmx, int idimp, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1) {
/* Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = (mx + 1)*(my + 1)*sizeof(float);
crc = cudaGetLastError();
gpu2ppost2l<<<dimGrid,dimBlock,ns>>>(ppart,q,kpic,qm,nppmx,idimp,mx,
my,nxv,nyv,mx1,mxy1);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpu2ppost2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppost2l(float *ppart, float *cu, int *kpic,
float qm, float dt, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float);
crc = cudaGetLastError();
gpu2jppost2l<<<dimGrid,dimBlock,ns>>>(ppart,cu,kpic,qm,dt,nppmx,
idimp,nx,ny,mx,my,nxv,nyv,mx1,
mxy1,ipbc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpu2jppost2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppostf2l(float *ppart, float *cu, int *kpic,
int *ncl, int *ihole, float qm, float dt,
int nppmx, int idimp, int nx, int ny,
int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float) + (nblock_size+9)*sizeof(int);
crc = cudaGetLastError();
gpu2jppostf2l<<<dimGrid,dimBlock,ns>>>(ppart,cu,kpic,ncl,ihole,qm,dt,
nppmx,idimp,nx,ny,mx,my,nxv,
nyv,mx1,mxy1,ntmax,irc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpu2jppostf2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppost2l(float *ppart, float *cu, int *kpic,
float qm, float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float);
crc = cudaGetLastError();
gpu2rjppost2l<<<dimGrid,dimBlock,ns>>>(ppart,cu,kpic,qm,dt,ci,nppmx,
idimp,nx,ny,mx,my,nxv,nyv,mx1,
mxy1,ipbc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpu2rjppost2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppostf2l(float *ppart, float *cu, int *kpic,
int *ncl, int *ihole, float qm,
float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* Current Deposit Interface for C */
int n, m, ns;
dim3 dimBlock(nblock_size);
n = mxy1;
m = (n - 1)/maxgsx + 1;
n = n < maxgsx ? n : maxgsx;
dim3 dimGrid(n,m);
ns = 3*(mx + 1)*(my + 1)*sizeof(float) + (nblock_size+9)*sizeof(int);
crc = cudaGetLastError();
gpu2rjppostf2l<<<dimGrid,dimBlock,ns>>>(ppart,cu,kpic,ncl,ihole,qm,
dt,ci,nppmx,idimp,nx,ny,mx,
my,nxv,nyv,mx1,mxy1,ntmax,
irc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpu2rjppostf2l error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucaguard2l(float2 *qc, float *q, int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* Guard Cell Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid(ny);
crc = cudaGetLastError();
gpucaguard2l<<<dimGrid,dimBlock>>>(qc,q,nx,ny,nxe,nye,nxvh,nyv);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpucaguard2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucacguard2l(float2 *cuc, float *cu, int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* Guard Cell Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid(ny);
crc = cudaGetLastError();
gpucacguard2l<<<dimGrid,dimBlock>>>(cuc,cu,nx,ny,nxe,nye,nxvh,nyv);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpucacguard2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucbguard2l(float2 *bxyc, float *bxy, int nx, int ny,
int nxe, int nye, int nxvh, int nyv) {
/* Guard Cell Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid(ny);
crc = cudaGetLastError();
gpucbguard2l<<<dimGrid,dimBlock>>>(bxyc,bxy,nx,ny,nxe,nye,nxvh,nyv);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpucbguard2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppord2l(float *ppart, float *ppbuff, int *kpic,
int *ncl, int *ihole, int idimp, int nppmx,
int nx, int ny, int mx, int my, int mx1,
int my1, int npbmx, int ntmax, int *irc) {
/* Sort Interface for C */
int mxy1, n, m, ns;
dim3 dimBlock(nblock_size);
mxy1 = mx1*my1;
m = (mxy1 - 1)/maxgsx + 1;
n = mxy1 < maxgsx ? mxy1 : maxgsx;
dim3 dimGrid(n,m);
/* find which particles are leaving tile */
ns = (nblock_size+9)*sizeof(int);
crc = cudaGetLastError();
gpuppfnd2l<<<dimGrid,dimBlock,ns>>>(ppart,kpic,ncl,ihole,idimp,nppmx,
nx,ny,mx,my,mx1,my1,ntmax,irc);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuppfnd2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* buffer particles that are leaving tile and sum ncl */
ns = 9*sizeof(int);
crc = cudaGetLastError();
gpuppmov2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,ncl,ihole,idimp,
nppmx,mx1,my1,npbmx,ntmax,irc);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuppmov2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* copy incoming particles from ppbuff into ppart, update kpic */
ns = (nblock_size+18)*sizeof(int);
crc = cudaGetLastError();
gpuppord2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,kpic,ncl,ihole,
idimp,nppmx,mx1,my1,npbmx,ntmax,
irc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpuppord2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppordf2l(float *ppart, float *ppbuff, int *kpic,
int *ncl, int *ihole, int idimp, int nppmx,
int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* Sort Interface for C */
int mxy1, n, m, ns;
dim3 dimBlock(nblock_size);
mxy1 = mx1*my1;
m = (mxy1 - 1)/maxgsx + 1;
n = mxy1 < maxgsx ? mxy1 : maxgsx;
dim3 dimGrid(n,m);
/* buffer particles that are leaving tile and sum ncl */
ns = 9*sizeof(int);
crc = cudaGetLastError();
gpuppmov2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,ncl,ihole,idimp,
nppmx,mx1,my1,npbmx,ntmax,irc);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuppmov2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* copy incoming particles from ppbuff into ppart, update kpic */
ns = (nblock_size+18)*sizeof(int);
crc = cudaGetLastError();
gpuppord2l<<<dimGrid,dimBlock,ns>>>(ppart,ppbuff,kpic,ncl,ihole,
idimp,nppmx,mx1,my1,npbmx,ntmax,
irc);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpuppord2l error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpupois23t(float2 *qt, float2 *fxyt, float2 *ffct,
float *we, int nx, int ny, int nxvh,
int nyv, int nxhd, int nyhd) {
/* Poisson Solver Interface for C */
int nxh1, ns;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
ns = nblock_size*sizeof(float);
crc = cudaGetLastError();
gpupois23t<<<dimGrid,dimBlock,ns>>>(qt,fxyt,ffct,we,nx,ny,nxvh,nyv,
nxhd,nyhd);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpupois23t error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucuperp2t(float2 *cut, int nx, int ny, int nxvh,
int nyv) {
/* Poisson Solver Interface for C */
int nxh1;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
crc = cudaGetLastError();
gpucuperp2t<<<dimGrid,dimBlock>>>(cut,nx,ny,nxvh,nyv);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpucuperp2t error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuibpois23t(float2 *cut, float2 *bxyt, float2 *ffct,
float ci, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* Poisson Solver Interface for C */
int nxh1, ns;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
ns = nblock_size*sizeof(float);
crc = cudaGetLastError();
gpuibpois23t<<<dimGrid,dimBlock,ns>>>(cut,bxyt,ffct,ci,wm,nx,ny,nxvh,
nyv,nxhd,nyhd);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpuibpois23t error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpumaxwel2t(float2 *exyt, float2 *bxyt, float2 *cut,
float2 *ffct, float ci, float dt,
float *wf, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* Maxwell Solver Interface for C */
int nxh1, ns;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
ns = nblock_size*sizeof(float);
crc = cudaGetLastError();
gpumaxwel2t<<<dimGrid,dimBlock,ns>>>(exyt,bxyt,cut,ffct,ci, dt,wf,wm,
nx,ny,nxvh,nyv,nxhd,nyhd);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpumaxwel2t error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuemfield2t(float2 *fxyt, float2 *exyt, float2 *ffct,
int isign, int nx, int ny, int nxvh,
int nyv, int nxhd, int nyhd) {
/* Maxwell Solver Interface for C */
int nxh1;
dim3 dimBlock(nblock_size);
nxh1 = nx/2 + 1;
dim3 dimGrid(nxh1);
crc = cudaGetLastError();
gpuemfield2t<<<dimGrid,dimBlock>>>(fxyt,exyt,ffct,isign,nx,ny,nxvh,
nyv,nxhd,nyhd);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpuemfield2t error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcs(float2 *f, float2 *g, int isign,
int *mixup, float2 *sct, int indx,
int indy, int nxhd, int nyd, int nxhyd,
int nxyhd) {
/* wrapper function for real to complex fft, without packed data */
/* if isign = -1, f = input, g = output */
/* if isign = 1, g = input, f = output */
/* nxhd must be >= nx/2 + 1 */
/* local data */
int nxh, nxh1, ny, nsize, ns;
int nxi = 1, nyi = 1, mx = 16;
dim3 dimBlock(nblock_size);
dim3 dimBlockt(mx,mx);
/* calculate range of indices */
nxh = 1L<<(indx - 1);
nxh1 = nxh + 1;
ny = 1L<<indy;
dim3 dimGridx(nxh1);
dim3 dimGridy(ny);
dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1);
dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1);
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(f,isign,mixup,sct,indx,indy,
nyi,ny,nxhd,nyd,nxhyd,nxyhd,
nsize);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
/* transpose f to g */
ns = (mx+1)*mx*sizeof(float2);
crc = cudaGetLastError();
gpuctpose4<<<dimGridtx,dimBlockt,ns>>>(f,g,nxh1,ny,nxhd,nyd);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcys<<<dimGridx,dimBlock,ns>>>(g,isign,mixup,sct,indx,indy,
nxi,nxh1,nxhd,nyd,nxhyd,
nxyhd,nsize);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
/* transpose g to f */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = cudaGetLastError(); */
/* gpuctpose4<<<dimGridty,dimBlockt,ns>>>(g,f,ny,nxh1,nyd,nxhd); */
/* cudaThreadSynchronize(); */
/* crc = cudaGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc)); */
/* exit(1); */
/* } */
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f to g */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = cudaGetLastError(); */
/* gpuctpose4<<<dimGridtx,dimBlockt,ns>>>(f,g,nxh1,ny,nxhd,nyd); */
/* cudaThreadSynchronize(); */
/* crc = cudaGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc)); */
/* exit(1); */
/* } */
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcys<<<dimGridx,dimBlock,ns>>>(g,isign,mixup,sct,indx,indy,
nxi,nxh1,nxhd,nyd,nxhyd,
nxyhd,nsize);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
/* transpose g to f */
ns = (mx+1)*mx*sizeof(float2);
crc = cudaGetLastError();
gpuctpose4<<<dimGridty,dimBlockt,ns>>>(g,f,ny,nxh1,nyd,nxhd);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(f,isign,mixup,sct,indx,indy,
nyi,ny,nxhd,nyd,nxhyd,nxyhd,
nsize);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcsn(float2 *fn, float2 *gn, int isign,
int *mixup, float2 *sct, int indx,
int indy, int ndim, int nxhd, int nyd,
int nxhyd, int nxyhd) {
/* wrapper function for multiple real to complex ffts, */
/* without packed data */
/* if isign = -1, fn = input, gn = output */
/* if isign = 1, gn = input, fn = output */
/* ndim = vector dimension */
/* nxhd must be >= nx/2 + 1 */
/* local data */
int nxh, nxh1, ny, nxp, nyp, nnxd, nnyd, nsize, ns;
int nxi = 1, nyi = 1, mx = 16;
dim3 dimBlock(nblock_size);
dim3 dimBlockt(mx,mx);
/* calculate range of indices */
nxh = 1L<<(indx - 1);
nxh1 = nxh + 1;
ny = 1L<<indy;
nxp = ndim*nxh1;
nyp = ndim*ny;
nnxd = ndim*nxhd;
nnyd = ndim*nyd;
dim3 dimGridx(nxp);
dim3 dimGridy(nyp);
dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1);
dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1);
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(fn,isign,mixup,sct,indx,
indy,nyi,nyp,nxhd,nnyd,
nxhyd,nxyhd,nsize);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
/* transpose f to g */
ns = ndim*(mx+1)*mx*sizeof(float2);
crc = cudaGetLastError();
gpuctpose4n<<<dimGridtx,dimBlockt,ns>>>(fn,gn,nxh1,ny,ndim,nxhd,
nyd);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuctpose4n error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcys<<<dimGridx,dimBlock,ns>>>(gn,isign,mixup,sct,indx,
indy,nxi,nxp,nnxd,nyd,nxhyd,
nxyhd,nsize);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
/* transpose g to f */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = cudaGetLastError(); */
/* gpuctpose4n<<<dimGridty,dimBlockt,ns>>>(gn,fn,ny,nxh1,ndim,nyd, */
/* nxhd); */
/* cudaThreadSynchronize(); */
/* crc = cudaGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4 error=%d:%s\n",crc, */
/* cudaGetErrorString(crc)); */
/* exit(1); */
/* } */
}
/* forward fourier transform */
else if (isign > 0) {
/* transpose f to g */
/* ns = (mx+1)*mx*sizeof(float2); */
/* crc = cudaGetLastError(); */
/* gpuctpose4n<<<dimGridtx,dimBlockt,ns>>>(fn,gn,nxh1,ny,ndim,nxhd, */
/* nyd); */
/* cudaThreadSynchronize(); */
/* crc = cudaGetLastError(); */
/* if (crc) { */
/* printf("gpuctpose4n error=%d:%s\n",crc, */
/* cudaGetErrorString(crc)); */
/* exit(1); */
/* } */
/* perform y fft */
nsize = ny < 1024 ? ny : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcys<<<dimGridx,dimBlock,ns>>>(gn,isign,mixup,sct,indx,
indy,nxi,nxp,nnxd,nyd,nxhyd,
nxyhd,nsize);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcys error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
/* transpose g to f */
ns = ndim*(mx+1)*mx*sizeof(float2);
crc = cudaGetLastError();
gpuctpose4n<<<dimGridty,dimBlockt,ns>>>(gn,fn,ny,nxh1,ndim,nyd,
nxhd);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuctpose4n error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
/* perform x fft */
nsize = nxh < 1024 ? nxh : 1024;
ns = nsize*sizeof(float2);
crc = cudaGetLastError();
gpufft2rcxs<<<dimGridy,dimBlock,ns>>>(fn,isign,mixup,sct,indx,
indy,nyi,nyp,nxhd,nnyd,
nxhyd,nxyhd,nsize);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpufft2rcxs error=%d:%s\n",
crc,cudaGetErrorString(crc));
exit(1);
}
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpusum2(float *a, float *sa, int nx) {
/* segmented 1d parallel sum reduction of input array a, of length nx */
/* first reduce individual blocks in parallel, writing result to scr */
/* then reduce scr serially, result is written to sa */
/* local data */
int nbx, nbs, ns;
void *gptr;
static int len = 0;
static float *scr = NULL;
nbx = (nx - 1)/nblock_size + 1;
dim3 dimBlock(nblock_size);
dim3 dimGrid(nbx);
nbs = (nbx - 1)/nblock_size + 1;
dim3 dimGrid1(nbs);
/* create scratch array */
if (len < nbx) {
if (len > 0)
crc = cudaFree((void *)scr);
crc = cudaMalloc(&gptr,sizeof(float)*nbx);
if (crc) {
printf("cudaMalloc cgpusum2 float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nbx);
exit(1);
}
scr = (float *)gptr;
len = nbx;
}
/* reduce individual blocks in parallel */
ns = nblock_size*sizeof(float);
crc = cudaGetLastError();
gpusum2<<<dimGrid,dimBlock,ns>>>(a,scr,nx);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpusum2 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* 1d serial reduction */
crc = cudaGetLastError();
gpusum1<<<dimGrid1,dimBlock,ns>>>(scr,sa,nbx);
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("gpusum1 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void cgpubppush23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic, float *qbm,
float *dt, float *dtc,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ek = (float *)*gp_ek;
cgpubppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpubppushf23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qbm,
float *dt, float *dtc,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ntmax,
unsigned long *gp_irc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
ek = (float *)*gp_ek;
irc = (int *)*gp_irc;
cgpubppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp,
*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,
irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppush23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic, float *qbm,
float *dt, float *dtc, float *ci,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ek = (float *)*gp_ek;
cgpurbppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpurbppushf23l_(unsigned long *gp_ppart,
unsigned long *gp_fxy,
unsigned long *gp_bxy,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qbm,
float *dt, float *dtc, float *ci,
unsigned long *gp_ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ntmax,
unsigned long *gp_irc) {
float *ppart, *fxy, *bxy, *ek;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
fxy = (float *)*gp_fxy;
bxy = (float *)*gp_bxy;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
ek = (float *)*gp_ek;
irc = (int *)*gp_irc;
cgpurbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2ppost2l_(unsigned long *gp_ppart,
unsigned long *gp_q,
unsigned long *gp_kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1) {
float *ppart, *q;
int *kpic;
ppart = (float *)*gp_ppart;
q = (float *)*gp_q;
kpic = (int *)*gp_kpic;
cgpu2ppost2l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppost2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic, float *qm,
float *dt, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1,
int *ipbc) {
float *ppart, *cu;
int *kpic;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
cgpu2jppost2l(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2jppostf2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qm,
float *dt, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, unsigned long *gp_irc) {
float *ppart, *cu;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpu2jppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny,
*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppost2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic, float *qm,
float *dt, float *ci, int *nppmx,
int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
float *ppart, *cu;
int *kpic;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
cgpu2rjppost2l(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx,
*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpu2rjppostf2l_(unsigned long *gp_ppart,
unsigned long *gp_cu,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, float *qm,
float *dt, float *ci, int *nppmx,
int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int*mx1,
int *mxy1, int *ntmax,
unsigned long *gp_irc) {
float *ppart, *cu;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
cu = (float *)*gp_cu;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpu2rjppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucaguard2l_(unsigned long *gp_qc,
unsigned long *gp_q, int *nx, int *ny,
int *nxe, int *nye, int *nxvh,
int *nyv) {
float2 *qc;
float *q;
qc = (float2 *)*gp_qc;
q = (float *)*gp_q;
cgpucaguard2l(qc,q,*nx,*ny,*nxe,*nye,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucacguard2l_(unsigned long *gp_cuc,
unsigned long *gp_cu, int *nx, int *ny,
int *nxe, int *nye, int *nxvh,
int *nyv) {
float2 *cuc;
float *cu;
cuc = (float2 *)*gp_cuc;
cu = (float *)*gp_cu;
cgpucacguard2l(cuc,cu,*nx,*ny,*nxe,*nye,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucbguard2l_(unsigned long *gp_bxyc,
unsigned long *gp_bxy, int *nx, int *ny,
int *nxe, int *nye, int *nxvh,
int *nyv) {
float2 *bxyc;
float *bxy;
bxyc = (float2 *)*gp_bxyc;
bxy = (float *)*gp_bxy;
cgpucbguard2l(bxyc,bxy,*nx,*ny,*nxe,*nye,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppord2l_(unsigned long *gp_ppart,
unsigned long *gp_ppbuff,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, int *idimp,
int *nppmx, int *nx, int *ny, int *mx,
int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, unsigned long *gp_irc) {
float *ppart, *ppbuff;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
ppbuff = (float *)*gp_ppbuff;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpuppord2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,
*my,*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuppordf2l_(unsigned long *gp_ppart,
unsigned long *gp_ppbuff,
unsigned long *gp_kpic,
unsigned long *gp_ncl,
unsigned long *gp_ihole, int *idimp,
int *nppmx, int *mx1, int *my1, int *npbmx,
int *ntmax, unsigned long *gp_irc) {
float *ppart, *ppbuff;
int *kpic, *ncl, *ihole, *irc;
ppart = (float *)*gp_ppart;
ppbuff = (float *)*gp_ppbuff;
kpic = (int *)*gp_kpic;
ncl = (int *)*gp_ncl;
ihole = (int *)*gp_ihole;
irc = (int *)*gp_irc;
cgpuppordf2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpupois23t_(unsigned long *gp_qt,
unsigned long *gp_fxyt,
unsigned long *gp_ffct,
unsigned long *gp_we, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd,
int *nyhd) {
float2 *qt, *fxyt, *ffct;
float *we;
qt = (float2 *)*gp_qt;
fxyt = (float2 *)*gp_fxyt;
ffct = (float2 *)*gp_ffct;
we = (float *)*gp_we;
cgpupois23t(qt,fxyt,ffct,we,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpucuperp2t_(unsigned long *gp_cut, int *nx, int *ny,
int *nxvh, int *nyv) {
float2 *cut;
cut = (float2 *)*gp_cut;
cgpucuperp2t(cut,*nx,*ny,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuibpois23t_(unsigned long *gp_cut,
unsigned long *gp_bxyt,
unsigned long *gp_ffct, float *ci,
unsigned long *gp_wm, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd,
int *nyhd) {
float2 *cut, *bxyt, *ffct;
float *wm;
cut = (float2 *)*gp_cut;
bxyt = (float2 *)*gp_bxyt;
ffct = (float2 *)*gp_ffct;
wm = (float *)*gp_wm;
cgpuibpois23t(cut,bxyt,ffct,*ci,wm,*nx,*ny,*nxvh,*nyv, *nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpumaxwel2t_(unsigned long *gp_exyt,
unsigned long *gp_bxyt,
unsigned long *gp_cut,
unsigned long *gp_ffct, float *ci,
float *dt, unsigned long *gp_wf,
unsigned long *gp_wm, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd,
int *nyhd) {
float2 *cut, *exyt, *bxyt, *ffct;
float *wf, *wm;
cut = (float2 *)*gp_cut;
exyt = (float2 *)*gp_exyt;
bxyt = (float2 *)*gp_bxyt;
ffct = (float2 *)*gp_ffct;
wf = (float *)*gp_wf;
wm = (float *)*gp_wm;
cgpumaxwel2t(exyt,bxyt,cut,ffct,*ci,*dt,wf,wm,*nx,*ny,*nxvh,*nyv,
*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuemfield2t_(unsigned long *gp_fxyt,
unsigned long *gp_exyt,
unsigned long *gp_ffct, int *isign,
int *nx, int *ny, int *nxvh, int *nyv,
int *nxhd, int *nyhd) {
float2 *fxyt, *exyt, *ffct;
fxyt = (float2 *)*gp_fxyt;
exyt = (float2 *)*gp_exyt;
ffct = (float2 *)*gp_ffct;
cgpuemfield2t(fxyt,exyt,ffct,*isign,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcs_(unsigned long *gp_f, unsigned long *gp_g,
int *isign, unsigned long *gp_mixup,
unsigned long *gp_sct, int *indx,
int *indy, int *nxhd, int *nyd,
int *nxhyd, int *nxyhd) {
float2 *f, *g, *sct;
int *mixup;
f = (float2 *)*gp_f;
g = (float2 *)*gp_g;
mixup = (int *)*gp_mixup;
sct = (float2 *)*gp_sct;
cgpuwfft2rcs(f,g,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpuwfft2rcsn_(unsigned long *gp_fn,
unsigned long *gp_gn, int *isign,
unsigned long *gp_mixup,
unsigned long *gp_sct, int *indx,
int *indy, int *ndim, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
float2 *fn, *gn, *sct;
int *mixup;
fn = (float2 *)*gp_fn;
gn = (float2 *)*gp_gn;
mixup = (int *)*gp_mixup;
sct = (float2 *)*gp_sct;
cgpuwfft2rcsn(fn,gn,*isign,mixup,sct,*indx,*indy,*ndim,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void cgpusum2_(unsigned long *gp_a, unsigned long *gp_sa,
int *nx) {
float *a, *sa;
a = (float *)*gp_a;
sa = (float *)*gp_sa;
cgpusum2(a,sa,*nx);
return;
}
|
674b57534e313de249e4c17a9646cb0a75ff50df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
__global__ void doubling(int n, float *a) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int i;
while(tid < n) {
a[tid] *= 2;
for(i=0; i<1000; i++) a[tid] *= 1;
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char **argv) {
int count, rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
hipGetDeviceCount(&count);
if( rank==0 ) {
printf("Number of CUDA-enabled GPU devices: %d\n", count);
printf("MPI size: %d\n", size);
}
int i;
int n=65535*3500;
float *a, *a_dev;
a = (float *)malloc(n*sizeof(float));
hipMalloc((void**)&a_dev, n*sizeof(float));
for(i=0; i<n; i++) a[i] = 1.;
hipMemcpy(a_dev, a, n*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( doubling), dim3(65535), dim3(256), 0, 0, n, a_dev);
hipMemcpy(a, a_dev, n*sizeof(float), hipMemcpyDeviceToHost);
for(i=0; i<n; i++) {
if(abs(a[i] - 2.) > 1e-5) printf("a[%d] = %g\n", i, a[i]);
}
free(a);
hipFree(a_dev);
MPI_Finalize();
return 0;
}
| 674b57534e313de249e4c17a9646cb0a75ff50df.cu | #include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
__global__ void doubling(int n, float *a) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
int i;
while(tid < n) {
a[tid] *= 2;
for(i=0; i<1000; i++) a[tid] *= 1;
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char **argv) {
int count, rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
cudaGetDeviceCount(&count);
if( rank==0 ) {
printf("Number of CUDA-enabled GPU devices: %d\n", count);
printf("MPI size: %d\n", size);
}
int i;
int n=65535*3500;
float *a, *a_dev;
a = (float *)malloc(n*sizeof(float));
cudaMalloc((void**)&a_dev, n*sizeof(float));
for(i=0; i<n; i++) a[i] = 1.;
cudaMemcpy(a_dev, a, n*sizeof(float), cudaMemcpyHostToDevice);
doubling<<<65535, 256>>>(n, a_dev);
cudaMemcpy(a, a_dev, n*sizeof(float), cudaMemcpyDeviceToHost);
for(i=0; i<n; i++) {
if(abs(a[i] - 2.) > 1e-5) printf("a[%d] = %g\n", i, a[i]);
}
free(a);
cudaFree(a_dev);
MPI_Finalize();
return 0;
}
|
a953bdd16f30792f68bbddaacb5e8eb1b5b2fcbb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ComputeSpeQtyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Label = NULL;
hipMalloc(&Label, XSIZE*YSIZE);
double *Dens = NULL;
hipMalloc(&Dens, XSIZE*YSIZE);
double *ExtLabel = NULL;
hipMalloc(&ExtLabel, XSIZE*YSIZE);
int nrad = 1;
int nsec = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ComputeSpeQtyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Label,Dens,ExtLabel,nrad,nsec);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ComputeSpeQtyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Label,Dens,ExtLabel,nrad,nsec);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ComputeSpeQtyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Label,Dens,ExtLabel,nrad,nsec);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a953bdd16f30792f68bbddaacb5e8eb1b5b2fcbb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ComputeSpeQtyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Label = NULL;
cudaMalloc(&Label, XSIZE*YSIZE);
double *Dens = NULL;
cudaMalloc(&Dens, XSIZE*YSIZE);
double *ExtLabel = NULL;
cudaMalloc(&ExtLabel, XSIZE*YSIZE);
int nrad = 1;
int nsec = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ComputeSpeQtyKernel<<<gridBlock,threadBlock>>>(Label,Dens,ExtLabel,nrad,nsec);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ComputeSpeQtyKernel<<<gridBlock,threadBlock>>>(Label,Dens,ExtLabel,nrad,nsec);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ComputeSpeQtyKernel<<<gridBlock,threadBlock>>>(Label,Dens,ExtLabel,nrad,nsec);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
10fee7fba07e92f8715225bfd34a6e2b98ca3d78.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************
*
* kpp_integrate_cuda_prototype.cu
* Prototype file for kpp CUDA kernel
*
* Copyright 2016 The Cyprus Institute
*
* Developers: Michail Alvanos - [email protected]
* Giannis Ashiotis
* Theodoros Christoudias - [email protected]
*
********************************************************************/
#include <stdio.h>
#include <unistd.h>
#include "hip/hip_runtime.h"
=#=#=#=#=#=#=#=#=#=#=defines_vars_2=#=#=#=#=#=#=#=#=#=#=
#define BLOCKSIZE 64
//#define MAX_VL_GLO 12288 /* elements that will pass in each call */
#define REDUCTION_SIZE_1 64
#define REDUCTION_SIZE_2 32
=#=#=#=#=#=#=#=#=#=#=defines_vars_1=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_1=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_2=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_3=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_4=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_5=#=#=#=#=#=#=#=#=#=#=
#define ifun 0
#define ijac 1
#define istp 2
#define iacc 3
#define irej 4
#define idec 5
#define isol 6
#define isng 7
#define itexit 0
#define ihexit 1
#define ZERO 0.0
#define ONE 1.0
#define HALF 0.5
/*
* Fortran to C macros
* GPU-friendly array deffinition
* i:VL_GLO, j:NVAR
*
*/
#define conc(i,j) conc[(j)*VL_GLO+(i)]
#define khet_st(i,j) khet_st[(j)*VL_GLO+(i)]
#define khet_tr(i,j) khet_tr[(j)*VL_GLO+(i)]
#define jx(i,j) jx[j*VL_GLO+i]
#define istatus(i,j) istatus[(j)*(VL_GLO)+(i)]
#define rstatus(i,j) rstatus[(j)*(VL_GLO)+(i)]
#define ROUND128(X) (X + (128 - 1)) & ~(128 - 1)
#define rconst(i,j) rconst[(j)]
/* Temporary arrays allocated in stack */
#define var(i,j) var[(j)]
#define fix(i,j) fix[(j)]
#define jcb(i,j) jcb[(j)]
#define varDot(i,j) varDot[j]
#define varNew(i,j) varNew[(j)]
#define Fcn0(i,j) Fcn0[(j)]
#define Fcn(i,j) Fcn[(j)]
#define Fcn(i,j) Fcn[(j)]
#define dFdT(i,j) dFdT[(j)]
#define varErr(i,j) varErr[(j)]
#define K(i,j,k) K[(j)*(NVAR)+(k)]
#define jac0(i,j) jac0[(j)]
#define Ghimj(i,j) Ghimj[(j)]
/* Enable debug flags for GPU */
//#define DEBUG
#ifdef DEBUG
#define GPU_DEBUG()\
gpuErrchk( hipPeekAtLastError() ); \
gpuErrchk( hipDeviceSynchronize() );
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
/* If debug flags are disabled */
#define GPU_DEBUG()
#define gpuErrchk(ans) ans
#endif
/** prefetches into L1 cache */
__device__ inline void prefetch_gl1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L1 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L1 [%0];": :"l"(p));
#endif
}
/** prefetches into L2 cache */
__device__ inline void prefetch_gl2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L2 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L2 [%0];": :"l"(p));
#endif
}
__device__ void update_rconst(const double * __restrict__ var,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx, double * __restrict__ rconst,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO);
/* This runs on CPU */
double machine_eps_flt()
{
double machEps = 1.0f;
do
{
machEps /= 2.0f;
// If next epsilon yields 1, then break, because current
// epsilon is the machine epsilon.
}
while ((double)(1.0 + (machEps/2.0)) != 1.0);
return machEps;
}
/* This runs on GPU */
__device__ double machine_eps_flt_cuda()
{
typedef union
{
long i64;
double f64;
} flt_64;
flt_64 s;
s.f64 = 1.;
s.i64++;
return (s.f64 - 1.);
}
__device__ static double alpha_AN(const int n, const int ro2type, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
/* IF (ro2type = 1) THEN m = 0.4 ! primary RO2
ELSE IF (ro2type = 2) THEN m = 1. ! secondary RO2
ELSE IF (ro2type = 3) THEN m = 0.3 ! tertiary RO2
ELSE m = 1.
*/
double m = 1.;
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m;
return alpha_a;
}
__device__ static double alpha_AN(const int n, const int ro2type, const int bcarb, const int gcarb, const int abic, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
double bcf=1., gcf=1., abf=1.;
double m = 1.; //According to Teng, ref3189
if (bcarb == 1) { bcf = 0.19; }// derived from Praske, ref3190: alpha_AN = 0.03 for the secondary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (gcarb == 1) {gcf = 0.44; }// derived from Praske, ref3190: alpha_AN = 0.07 for the primary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (abic == 1) { abf = 0.24; }// derived from the ratio of AN- yield for toluene from Elrod et al. (ref3180), 5.5 0x1.9206e69676542p+ 229t &
// 200 torr, and this SAR for linear alkyl RO2 with 9 heavy atoms, 23.3%
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m*bcf*gcf*abf;
return alpha_a;
}
__device__ static double k_RO2_HO2(const double temp, const int nC){
return 2.91e-13*exp(1300./temp)*(1.-exp(-0.245*nC)); // ref1630
}
__device__ double ros_ErrorNorm(double * __restrict__ var, double * __restrict__ varNew, double * __restrict__ varErr,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const int vectorTol )
{
double err, scale, varMax;
err = ZERO;
if (vectorTol){
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr[i]);
prefetch_ll1(&absTol[i]);
prefetch_ll1(&relTol[i]);
prefetch_ll1(&var[i]);
prefetch_ll1(&varNew[i]);
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var[i]),fabs(varNew[i]));
scale = absTol[i]+ relTol[i]*varMax;
err += pow((double)varErr[i]/scale,2.0);
}
err = sqrt((double) err/NVAR);
}else{
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr[i]);
prefetch_ll1(&var[i]);
prefetch_ll1(&varNew[i]);
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var[i]),fabs(varNew[i]));
scale = absTol[0]+ relTol[0]*varMax;
err += pow((double)varErr[i]/scale,2.0);
}
err = sqrt((double) err/NVAR);
}
return err;
}
=#=#=#=#=#=#=#=#=#=#=kppSolve=#=#=#=#=#=#=#=#=#=#=
__device__ void ros_Solve(double * __restrict__ Ghimj, double * __restrict__ K, int &Nsol, const int istage, const int ros_S)
{
#pragma unroll 4
for (int i=0;i<LU_NONZERO-16;i+=16){
prefetch_ll1(&Ghimj[i]);
}
kppSolve(Ghimj, K, istage, ros_S);
Nsol++;
}
=#=#=#=#=#=#=#=#=#=#=kppDecomp=#=#=#=#=#=#=#=#=#=#=
__device__ void ros_Decomp(double * __restrict__ Ghimj, int &Ndec, int VL_GLO)
{
kppDecomp(Ghimj, VL_GLO);
Ndec++;
}
=#=#=#=#=#=#=#=#=#=#=ros_PrepareMatrix=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=Jac_sp=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=Fun=#=#=#=#=#=#=#=#=#=#=
__device__ void ros_FunTimeDerivative(const double T, double roundoff, double * __restrict__ var, const double * __restrict__ fix,
const double * __restrict__ rconst, double *dFdT, double *Fcn0, int &Nfun,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
const double DELTAMIN = 1.0E-6;
double delta,one_over_delta;
delta = sqrt(roundoff)*fmax(DELTAMIN,fabs(T));
one_over_delta = 1.0/delta;
Fun(var, fix, rconst, dFdT, Nfun, VL_GLO);
for (int i=0; i < NVAR; i++){
dFdT(index,i) = (dFdT(index,i) - Fcn0(index,i)) * one_over_delta;
}
}
__device__ static int ros_Integrator(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Rosenbrock method coefficients
const int ros_S, const double * __restrict__ ros_M, const double * __restrict__ ros_E, const double * __restrict__ ros_A, const double * __restrict__ ros_C,
const double * __restrict__ ros_Alpha, const double * __restrict__ ros_Gamma, const double ros_ELO, const int * ros_NewF,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double H, Hnew, HC, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// ~~~> Time loop begins below
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, ros_Gamma[0], jac0, Ghimj, Nsng, Ndec, VL_GLO);
// ~~~> Compute the stages
// Stage:
for (int istage=0; istage < ros_S; istage++)
{
// For the 1st istage the function has been computed previously
if (istage == 0)
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = Fcn0(index,i); // FCN0 Read
}
}
else if(ros_NewF[istage])
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = var(index,i);
}
for (int j=0; j < (istage); j++){
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i);
}
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
}
for (int i=0; i<NVAR; i++)
K(index,istage,i) = varNew(index,i);
for (int j=0; j<(istage); j++)
{
HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,j,i);
K(index,istage,i) += tmp*HC;
}
}
if ((!autonomous) && (ros_Gamma[istage] ))
{
HG = direction*H*ros_Gamma[istage];
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
double tmpNew = var(index,i); /// VAR READ
double tmpErr = ZERO;
for (int j=0; j<ros_S; j++){
double tmp = K(index,j,i);
#ifdef DEBUG
if (isnan(tmp)){
printf("Solver detected NAN!");
tmp = 0;
}
#endif
tmpNew += tmp*ros_M[j];
tmpErr += tmp*ros_E[j];
}
varNew(index,i) = tmpNew; // varNew is killed
varErr(index,i) = tmpErr;
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/ros_ELO)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
typedef struct {
double ros_A[15];
double ros_C[15];
int ros_NewF[8];
double ros_M[6];
double ros_E[6];
double ros_Alpha[6];
double ros_Gamma[6];
double ros_ELO;
int ros_S;
} ros_t;
/*
* Lookup tables for different ROS for branch elimination. It is much faster in GPU.
*/
__device__ __constant__ ros_t ros[5] = {
{
{.58578643762690495119831127579030,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-1.17157287525380990239662255158060,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{.87867965644035742679746691368545,.29289321881345247559915563789515,0,0,0,0}, /* ros_M */
{.29289321881345247559915563789515,.29289321881345247559915563789515,0,0,0,0}, /* ros_E */
{0,1.0,0,0,0,0}, /* ros_Alpha */
{1.70710678118654752440084436210485,-1.70710678118654752440084436210485,0,0,0,0}, /* ros_Gamma */
2.0, /* ros_ELO */
2, /* ros_S*/
}, /* Ros2 */
{
{1.0,1.0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.10156171083877702091975600115545E+01, 0.40759956452537699824805835358067E+01,0.92076794298330791242156818474003E+01,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{0.1E+01,0.61697947043828245592553615689730E+01,-0.42772256543218573326238373806514E+00,0,0,0}, /* ros_M */
{0.5E+00,- 0.29079558716805469821718236208017E+01,0.22354069897811569627360909276199E+00,0,0,0}, /* ros_E */
{0.0E+00,0.43586652150845899941601945119356E+00,0.43586652150845899941601945119356E+00,0,0,0}, /* ros_Alpha */
{0.43586652150845899941601945119356E+00,0.24291996454816804366592249683314E+00,0.21851380027664058511513169485832E+01,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
3
}, /* Ros3 */
{
{0.2000000000000000E+01, 0.1867943637803922E+01, 0.2344449711399156E+00, 0.1867943637803922E+01, 0.2344449711399156E+00,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.7137615036412310E+01,0.2580708087951457E+01,0.6515950076447975E+00, - 0.2137148994382534E+01, - 0.3214669691237626E+00, - 0.6949742501781779E+00 ,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,1,0,0,0,0,0}, /* ros_NewF */
{0.2255570073418735E+01, 0.2870493262186792E+00, 0.4353179431840180E+00, 0.1093502252409163E+01,0,0}, /* ros_M */
{ -0.2815431932141155E+00, -0.7276199124938920E-01, -0.1082196201495311E+00, -0.1093502252409163E+01, 0, 0}, /* ros_E */
{0.0, 0.1145640000000000E+01, 0.6552168638155900E+00, 0.6552168638155900E+00,0,0}, /* ros_Alpha */
{ 0.5728200000000000E+00, -0.1769193891319233E+01, 0.7592633437920482E+00, -0.1049021087100450E+00,0,0}, /* ros_Gamma */
4.0, /* ros_ELO */
4
}, /* Ros4 */
{
{ 0.0E+00, 2.0E+00, 0.0E+00, 2.0E+00, 0.0E+00, 1.0E+00, 0,0,0,0,0,0,0,0,0}, /* ros_A */
{ 4.0E+00, 1.0E+00, - 1.0E+00, 1.0E+00, - 1.0E+00, - 2.66666666666666666666666666666666, 0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,0,1,1,0,0,0,0}, /* ros_NewF */
{2.0,0,1.0,1.0,0,0}, /* ros_M */
{0,0,0,1.0,0,0}, /* ros_E */
{0,0,1.0,1.0,0,0}, /* ros_Alpha */
{0.5,1.5,0,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
4
}, /* Rodas3 */
{
{
0.1544000000000000E+01, 0.9466785280815826E+00, 0.2557011698983284E+00, 0.3314825187068521E+01,
0.2896124015972201E+01, 0.9986419139977817E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 1.0E+00}, /* ros_A */
{
-0.5668800000000000E+01, -0.2430093356833875E+01, -0.2063599157091915E+00, -0.1073529058151375E+00,
-0.9594562251023355E+01, -0.2047028614809616E+02, 0.7496443313967647E+01, -0.1024680431464352E+02,
-0.3399990352819905E+02, 0.1170890893206160E+02, 0.8083246795921522E+01, -0.7981132988064893E+01,
-0.3152159432874371E+02, 0.1631930543123136E+02, -0.6058818238834054E+01}, /* ros_C */
{1,1,1,1,1,1,0,0}, /* ros_NewF */
{0.1221224509226641E+01,0.6019134481288629E+01,0.1253708332932087E+02,- 0.6878860361058950E+00,1,1}, /* ros_M */
{0,0,0,0,0,1.0}, /* ros_E */
{0.000, 0.386, 0.210, 0.630, 1.000, 1.000}, /* ros_Alpha */
{0.2500000000000000E+00, -0.1043000000000000E+00, 0.1035000000000000E+00, 0.3620000000000023E-01, 0, 0}, /* ros_Gamma */
4.0, /* ros_ELO */
6
} /* Rodas4 */
};
//__device__ double rconst_local[MAX_VL_GLO*NREACT];
/* Initialize rconst local */
//__device__ double * rconst_local;
__device__ double k_3rd(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (usually fc=0.6)
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, k_3rd_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
k_3rd_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio),2)));
return k_3rd_r;
}
__device__ double k_3rd_iupac(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (e.g. 0.45 or 0.6...)
* nu N
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, nu, k_3rd_iupac_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
nu = 0.75- 1.27*log10(fc);
k_3rd_iupac_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio)/nu,2)));
return k_3rd_iupac_r;
}
double * temp_gpu;
double * press_gpu;
double * cair_gpu;
=#=#=#=#=#=#=#=#=#=#=update_rconst=#=#=#=#=#=#=#=#=#=#=
__global__
void Rosenbrock(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
// values calculated from icntrl and rcntrl at host
const int autonomous, const int vectorTol, const int UplimTol, const int method, const int Max_no_steps,
double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
// cuda global mem buffers
const double * __restrict__ absTol, const double * __restrict__ relTol,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// global input
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
// extra
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/*
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
double *Ghimj = &d_Ghimj[index*LU_NONZERO];
double *K = &d_K[index*NVAR*6];
double *varNew = &d_varNew[index*NVAR];
double *Fcn0 = &d_Fcn0[index*NVAR];
double *dFdT = &d_dFdT[index*NVAR];
double *jac0 = &d_jac0[index*LU_NONZERO];
double *varErr = &d_varErr[index*NVAR];
double *var = &d_var[index*NSPEC];
double *fix = &d_fix[index*NFIX];
double *rconst = &d_rconst[index*NREACT];
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* FIXME: add check for method */
const double *ros_A = &ros[method-1].ros_A[0];
const double *ros_C = &ros[method-1].ros_C[0];
const double *ros_M = &ros[method-1].ros_M[0];
const double *ros_E = &ros[method-1].ros_E[0];
const double *ros_Alpha = &ros[method-1].ros_Alpha[0];
const double *ros_Gamma = &ros[method-1].ros_Gamma[0];
const int *ros_NewF = &ros[method-1].ros_NewF[0];
const int ros_S = ros[method-1].ros_S;
const double ros_ELO = ros[method-1].ros_ELO;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO);
ros_Integrator(var, fix, Tstart, Tend, Texit,
// Rosenbrock method coefficients
ros_S, ros_M, ros_E, ros_A, ros_C,
ros_Alpha, ros_Gamma, ros_ELO, ros_NewF,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
=#=#=#=#=#=#=#=#=#=#=special_ros=#=#=#=#=#=#=#=#=#=#=
// no int8 in CUDA :(
__global__ void reduce_istatus_1(int *istatus, int4 *tmp_out_1, int4 *tmp_out_2, int VL_GLO, int *xNacc, int *xNrej)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int idx_1 = threadIdx.x;
int global_size = blockDim.x*gridDim.x;
int foo;
//no int8 in CUDA :(
int4 accumulator_1 = make_int4(0,0,0,0);
int4 accumulator_2 = make_int4(0,0,0,0);
while (index < VL_GLO)
{
accumulator_1.x += istatus(index,0);
accumulator_1.y += istatus(index,1);
accumulator_1.z += istatus(index,2);
//some dirty work on the side...
foo = istatus(index,3);
xNacc[index] = foo;
accumulator_1.w += foo;
foo = istatus(index,4);
xNrej[index] = foo;
accumulator_2.x += foo;
accumulator_2.y += istatus(index,5);
accumulator_2.z += istatus(index,6);
accumulator_2.w += istatus(index,7);
index += global_size;
}
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_1];
__shared__ int4 buffer_2[REDUCTION_SIZE_1];
buffer_1[idx_1] = accumulator_1;
buffer_2[idx_1] = accumulator_2;
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_out_1[blockIdx.x] = buffer_1[0];
tmp_out_2[blockIdx.x] = buffer_2[0];
}
}
__global__ void reduce_istatus_2(int4 *tmp_out_1, int4 *tmp_out_2, int *out)
{
int idx_1 = threadIdx.x;
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_2];
__shared__ int4 buffer_2[REDUCTION_SIZE_2];
buffer_1[idx_1] = tmp_out_1[idx_1];
buffer_2[idx_1] = tmp_out_2[idx_1];
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_1 = buffer_1[0];
tmp_2 = buffer_2[0];
out[0] = tmp_1.x;
out[1] = tmp_1.y;
out[2] = tmp_1.z;
out[3] = tmp_1.w;
out[4] = tmp_2.x;
out[5] = tmp_2.y;
out[6] = tmp_2.z;
out[7] = tmp_2.w;
}
}
/* Assuming different processes */
enum { TRUE=1, FALSE=0 } ;
double *d_conc, *d_temp, *d_press, *d_cair, *d_khet_st, *d_khet_tr, *d_jx, *d_jac0, *d_Ghimj, *d_varNew, *d_K, *d_varErr, *d_dFdT, *d_Fcn0, *d_var, *d_fix, *d_rconst;
int initialized = FALSE;
/* Device pointers pointing to GPU */
double *d_rstatus, *d_absTol, *d_relTol;
int *d_istatus, *d_istatus_rd, *d_xNacc, *d_xNrej;
int4 *d_tmp_out_1, *d_tmp_out_2;
/* Allocate arrays on device for Rosenbrock */
__host__ void init_first_time(int pe, int VL_GLO, int size_khet_st, int size_khet_tr, int size_jx ){
/* Select the proper GPU CARD */
int deviceCount, device;
gpuErrchk( hipGetDeviceCount(&deviceCount) );
device = pe % deviceCount;
gpuErrchk( hipSetDevice(device) );
printf("PE[%d]: selected %d of total %d\n",pe,device,deviceCount);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
gpuErrchk( hipMalloc ((void **) &d_conc , sizeof(double)*VL_GLO*(NSPEC)) );
gpuErrchk( hipMalloc ((void **) &d_khet_st, sizeof(double)*VL_GLO*size_khet_st) );
gpuErrchk( hipMalloc ((void **) &d_khet_tr, sizeof(double)*VL_GLO*size_khet_tr) );
gpuErrchk( hipMalloc ((void **) &d_jx , sizeof(double)*VL_GLO*size_jx) );
gpuErrchk( hipMalloc ((void **) &d_rstatus , sizeof(double)*VL_GLO*2) );
gpuErrchk( hipMalloc ((void **) &d_istatus , sizeof(int)*VL_GLO*8) );
gpuErrchk( hipMalloc ((void **) &d_absTol , sizeof(double)*NVAR) );
gpuErrchk( hipMalloc ((void **) &d_relTol , sizeof(double)*NVAR) );
/* Allocate input arrays */
gpuErrchk( hipMalloc ((void **) &temp_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( hipMalloc ((void **) &press_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( hipMalloc ((void **) &cair_gpu , sizeof(double)*VL_GLO) );
/* Allocate arrays on device for reducing metrics */
gpuErrchk( hipMalloc ((void **) &d_istatus_rd , sizeof(int)*8));
gpuErrchk( hipMalloc ((void **) &d_tmp_out_1 , sizeof(int4)*64));
gpuErrchk( hipMalloc ((void **) &d_tmp_out_2 , sizeof(int4)*64));
gpuErrchk( hipMalloc ((void **) &d_xNacc , sizeof(int)*VL_GLO));
gpuErrchk( hipMalloc ((void **) &d_xNrej , sizeof(int)*VL_GLO));
/* Allocate arrays for solvers on device global memory to reduce the stack usage */
gpuErrchk( hipMalloc ((void **) &d_jac0, sizeof(double)*VL_GLO*LU_NONZERO) );
gpuErrchk( hipMalloc ((void **) &d_Ghimj, sizeof(double)*VL_GLO*LU_NONZERO) );
gpuErrchk( hipMalloc ((void **) &d_varNew, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( hipMalloc ((void **) &d_Fcn0, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( hipMalloc ((void **) &d_dFdT, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( hipMalloc ((void **) &d_K, sizeof(double)*VL_GLO*NVAR*6) ); // TODO: Change size according to solver steps
gpuErrchk( hipMalloc ((void **) &d_varErr, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( hipMalloc ((void **) &d_var, sizeof(double)*VL_GLO*NSPEC) );
gpuErrchk( hipMalloc ((void **) &d_fix, sizeof(double)*VL_GLO*NFIX) );
gpuErrchk( hipMalloc ((void **) &d_rconst, sizeof(double)*VL_GLO*NREACT) );
initialized = TRUE;
}
/*
* TODO: We should call it in some point..
*/
extern "C" void finalize_cuda(){
/* Free memory on the device */
gpuErrchk( hipFree(d_conc ) );
gpuErrchk( hipFree(d_temp ) );
gpuErrchk( hipFree(d_press ) );
gpuErrchk( hipFree(d_cair ) );
gpuErrchk( hipFree(d_khet_st ) );
gpuErrchk( hipFree(d_khet_tr ) );
gpuErrchk( hipFree(d_jx ) );
gpuErrchk( hipFree(d_rstatus ) );
gpuErrchk( hipFree(d_istatus ) );
gpuErrchk( hipFree(d_absTol ) );
gpuErrchk( hipFree(d_relTol ) );
gpuErrchk( hipFree(d_istatus_rd ) );
gpuErrchk( hipFree(d_tmp_out_1 ) );
gpuErrchk( hipFree(d_tmp_out_2 ) );
gpuErrchk( hipFree(d_xNacc ) );
gpuErrchk( hipFree(d_xNrej ) );
gpuErrchk( hipFree(temp_gpu ) );
gpuErrchk( hipFree(press_gpu ) );
gpuErrchk( hipFree(cair_gpu ) );
gpuErrchk( hipFree(d_jac0 ) );
gpuErrchk( hipFree(d_Ghimj ) );
gpuErrchk( hipFree(d_varNew ) );
gpuErrchk( hipFree(d_Fcn0 ) );
gpuErrchk( hipFree(d_dFdT ) );
gpuErrchk( hipFree(d_K ) );
gpuErrchk( hipFree(d_varErr ) );
gpuErrchk( hipFree(d_var ) );
gpuErrchk( hipFree(d_fix ) );
gpuErrchk( hipFree(d_rconst ) );
}
extern "C" void kpp_integrate_cuda_( int *pe_p, int *sizes, double *time_step_len_p, double *conc, double *temp, double *press, double *cair,
double *khet_st, double *khet_tr, double *jx, double *absTol, double *relTol, int *ierr, int *istatus,
int *xNacc, int *xNrej, double *rndoff, int *icntrl=NULL, double *rcntrl=NULL
)
/* // TODO
* Parameters:
* pe_p: scalar int - processor element
* VL_GLO: scalar int - size of the system
* NSPEC: scalar int - number of species
* NREACT: scalar int - number of reactions
* NVAR: scalar int -
*
* Input data:
* conc: 2D array of doubles - size: vl_glo x number of species
* temp: 1D array of doubles - size: vl_glo
* press: 1D array of doubles - size: vl_glo
* cair: 1D array of doubles - size: vl_glo
* khet_st: 2D array of doubles - size: vl_glo x number of species
* khet_tr: 2D array of doubles - size: vl_glo x number of species
* jx: 2D array of doubles - size: vl_glo x number of species
* absTol: 1D array of doubles - size: number of species
* relTol: 1D array of doubles - size: number of species
* Control:
* icntrl: 1D array of ints - size: 4
* sizes: 1D array of ints - size: 4
* rcntrl: 1D array of doubles - size: 7
*
*
*/
{
const double DELTAMIN = 1.0E-5;
int VL_GLO = sizes[0];
int size_khet_st = sizes[1];
int size_khet_tr = sizes[2];
int size_jx = sizes[3];
double roundoff = *rndoff;
double Tstart,Tend;
Tstart = ZERO;
Tend = *time_step_len_p;
int pe = *pe_p;
// variables from rcntrl and icntrl
int autonomous, vectorTol, UplimTol, method, Max_no_steps;
double Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe;
//int rcntrl_bool = 0, icntrl_bool=0;
if (rcntrl == NULL)
{
rcntrl = new double[7];
for (int i=0; i < 7; i++)
rcntrl[i] = 0.0;
}
if (icntrl == NULL)
{
icntrl = new int[4];
for (int i=0; i < 4; i++)
icntrl[i] = 0;
}
/* Allocate arrays on device for update_rconst kernel*/
if (initialized == FALSE) init_first_time(pe, VL_GLO, size_khet_st, size_khet_tr, size_jx);
/* Copy data from host memory to device memory */
gpuErrchk( hipMemcpy(d_conc , conc , sizeof(double)*VL_GLO*NSPEC , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(temp_gpu , temp , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(press_gpu , press , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(cair_gpu , cair , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_khet_st, khet_st , sizeof(double)*VL_GLO*size_khet_st , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_khet_tr, khet_tr , sizeof(double)*VL_GLO*size_khet_tr , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_jx , jx , sizeof(double)*VL_GLO*size_jx , hipMemcpyHostToDevice) );
/* Copy arrays from host memory to device memory for Rosenbrock */
gpuErrchk( hipMemcpy(d_absTol, absTol, sizeof(double)*NVAR, hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_relTol, relTol, sizeof(double)*NVAR, hipMemcpyHostToDevice) );
/* Compute execution configuration for update_rconst */
int block_size, grid_size;
block_size = BLOCKSIZE;
grid_size = (VL_GLO + block_size - 1)/block_size;
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
/* Execute the kernel */
//update_rconst<<<dimGrid,dimBlock>>>(d_conc, d_khet_st, d_khet_tr, d_jx, VL_GLO);
GPU_DEBUG();
// *------------------------------------------------------*
// | Default values vs input settings (icntrl, rcntrl) |
// *------------------------------------------------------*
int ierr_tmp=0;
{
// autonomous or time dependent ODE. Default is time dependent.
autonomous = !(icntrl[0] == 0);
// For Scalar tolerances (icntrl[1].NE.0) the code uses absTol(0) and relTol(0)
// For Vector tolerances (icntrl[1] == 0) the code uses absTol(0:NVAR) and relTol(0:NVAR)
if (icntrl[1] == 0)
{
vectorTol = 1; //bool
UplimTol = NVAR;
}
else
{
vectorTol = 0;
UplimTol = 1;
}
// The particular Rosenbrock method chosen
if (icntrl[2] == 0)
{
method = 4;
}
else if ((icntrl[2] >= 1) && (icntrl[2] <= 5))
{
method = icntrl[2];
}
else
{
printf("User-selected Rosenbrock method: icntrl[2]=%d\n",method);
ierr_tmp = -2;
}
// The maximum number of steps admitted
if (icntrl[3] == 0)
{
Max_no_steps = 100000;
}
else if (icntrl[3] > 0)
{
Max_no_steps=icntrl[3];
}
else
{
printf("User-selected max no. of steps: icntrl[3]=%d\n",icntrl[3]);
ierr_tmp = -1;
}
// Unit roundoff (1+ roundoff>1)
roundoff = machine_eps_flt();
// Lower bound on the step size: (positive value)
if (rcntrl[0] == ZERO)
{
Hmin = ZERO;
}
else if (rcntrl[0] > ZERO)
{
Hmin = rcntrl[0];
}
else
{
printf("User-selected Hmin: rcntrl[0]=%f\n",rcntrl[0]);
ierr_tmp = -3;
}
// Upper bound on the step size: (positive value)
if (rcntrl[1] == ZERO)
{
Hmax = fabs(Tend-Tstart);
}
else if (rcntrl[1] > ZERO)
{
Hmax = fmin(fabs(rcntrl[1]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hmax: rcntrl[1]=%f\n",rcntrl[1]);
ierr_tmp = -3;
}
// Starting step size: (positive value)
if (rcntrl[2] == ZERO)
{
Hstart = fmax(Hmin,DELTAMIN);
}
else if (rcntrl[2] > ZERO)
{
Hstart = fmin(fabs(rcntrl[2]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hstart: rcntrl[2]=%f\n",rcntrl[2]);
ierr_tmp = -3;
}
// Step size can be changed s.t. FacMin < Hnew/Hexit < FacMax
if (rcntrl[3] == ZERO)
{
FacMin = 0.2;
}
else if (rcntrl[3] > ZERO)
{
FacMin = rcntrl[3];
}
else
{
printf("User-selected FacMin: rcntrl[3]=%f\n",rcntrl[3]);
ierr_tmp = -4;
}
if (rcntrl[4] == ZERO)
{
FacMax = 6.0;
}
else if (rcntrl[4] > ZERO)
{
FacMax = rcntrl[4];
}
else
{
printf("User-selected FacMax: rcntrl[4]=%f\n",rcntrl[4]);
ierr_tmp = -4;
}
// FacRej: Factor to decrease step after 2 succesive rejections
if (rcntrl[5] == ZERO)
{
FacRej = 0.1;
}
else if (rcntrl[5] > ZERO)
{
FacRej = rcntrl[5];
}
else
{
printf("User-selected FacRej: rcntrl[5]=%f\n",rcntrl[5]);
ierr_tmp = -4;
}
// FacSafe: Safety Factor in the computation of new step size
if (rcntrl[6] == ZERO)
{
FacSafe = 0.9;
}
else if (rcntrl[6] > ZERO)
{
FacSafe = rcntrl[6];
}
else
{
printf("User-selected FacSafe: rcntrl[6]=%f\n",rcntrl[6]);
ierr_tmp = -4;
}
// Check if tolerances are reasonable
for (int i=0; i < UplimTol; i++)
{
if ((absTol[i] <= ZERO) || (relTol[i] <= 10.0*roundoff) || (relTol[i] >= 1.0))
{
printf("CCC absTol(%d) = %f \n",i,absTol[i]);
printf("CCC relTol(%d) = %f \n",i,relTol[i]);
ierr_tmp = -5;
}
}
}
=#=#=#=#=#=#=#=#=#=#=call_kernel=#=#=#=#=#=#=#=#=#=#=
GPU_DEBUG();
hipLaunchKernelGGL(( reduce_istatus_1), dim3(REDUCTION_SIZE_2),dim3(REDUCTION_SIZE_1), 0, 0, d_istatus, d_tmp_out_1, d_tmp_out_2, VL_GLO, d_xNacc, d_xNrej);
GPU_DEBUG();
hipLaunchKernelGGL(( reduce_istatus_2), dim3(1),dim3(REDUCTION_SIZE_2), 0, 0, d_tmp_out_1, d_tmp_out_2, d_istatus_rd);
GPU_DEBUG();
/* Copy the result back */
gpuErrchk( hipMemcpy( conc , d_conc , sizeof(double)*VL_GLO*NVAR, hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy( xNacc , d_xNacc , sizeof(int)*VL_GLO , hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy( xNrej , d_xNrej , sizeof(int)*VL_GLO , hipMemcpyDeviceToHost) );
return;
}
| 10fee7fba07e92f8715225bfd34a6e2b98ca3d78.cu | /*************************************************************
*
* kpp_integrate_cuda_prototype.cu
* Prototype file for kpp CUDA kernel
*
* Copyright 2016 The Cyprus Institute
*
* Developers: Michail Alvanos - [email protected]
* Giannis Ashiotis
* Theodoros Christoudias - [email protected]
*
********************************************************************/
#include <stdio.h>
#include <unistd.h>
#include "cuda.h"
=#=#=#=#=#=#=#=#=#=#=defines_vars_2=#=#=#=#=#=#=#=#=#=#=
#define BLOCKSIZE 64
//#define MAX_VL_GLO 12288 /* elements that will pass in each call */
#define REDUCTION_SIZE_1 64
#define REDUCTION_SIZE_2 32
=#=#=#=#=#=#=#=#=#=#=defines_vars_1=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_1=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_2=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_3=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_4=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=defines_ind_5=#=#=#=#=#=#=#=#=#=#=
#define ifun 0
#define ijac 1
#define istp 2
#define iacc 3
#define irej 4
#define idec 5
#define isol 6
#define isng 7
#define itexit 0
#define ihexit 1
#define ZERO 0.0
#define ONE 1.0
#define HALF 0.5
/*
* Fortran to C macros
* GPU-friendly array deffinition
* i:VL_GLO, j:NVAR
*
*/
#define conc(i,j) conc[(j)*VL_GLO+(i)]
#define khet_st(i,j) khet_st[(j)*VL_GLO+(i)]
#define khet_tr(i,j) khet_tr[(j)*VL_GLO+(i)]
#define jx(i,j) jx[j*VL_GLO+i]
#define istatus(i,j) istatus[(j)*(VL_GLO)+(i)]
#define rstatus(i,j) rstatus[(j)*(VL_GLO)+(i)]
#define ROUND128(X) (X + (128 - 1)) & ~(128 - 1)
#define rconst(i,j) rconst[(j)]
/* Temporary arrays allocated in stack */
#define var(i,j) var[(j)]
#define fix(i,j) fix[(j)]
#define jcb(i,j) jcb[(j)]
#define varDot(i,j) varDot[j]
#define varNew(i,j) varNew[(j)]
#define Fcn0(i,j) Fcn0[(j)]
#define Fcn(i,j) Fcn[(j)]
#define Fcn(i,j) Fcn[(j)]
#define dFdT(i,j) dFdT[(j)]
#define varErr(i,j) varErr[(j)]
#define K(i,j,k) K[(j)*(NVAR)+(k)]
#define jac0(i,j) jac0[(j)]
#define Ghimj(i,j) Ghimj[(j)]
/* Enable debug flags for GPU */
//#define DEBUG
#ifdef DEBUG
#define GPU_DEBUG()\
gpuErrchk( cudaPeekAtLastError() ); \
gpuErrchk( cudaDeviceSynchronize() );
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
/* If debug flags are disabled */
#define GPU_DEBUG()
#define gpuErrchk(ans) ans
#endif
/** prefetches into L1 cache */
__device__ inline void prefetch_gl1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L1 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L1 [%0];": :"l"(p));
#endif
}
/** prefetches into L2 cache */
__device__ inline void prefetch_gl2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L2 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L2 [%0];": :"l"(p));
#endif
}
__device__ void update_rconst(const double * __restrict__ var,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx, double * __restrict__ rconst,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO);
/* This runs on CPU */
double machine_eps_flt()
{
double machEps = 1.0f;
do
{
machEps /= 2.0f;
// If next epsilon yields 1, then break, because current
// epsilon is the machine epsilon.
}
while ((double)(1.0 + (machEps/2.0)) != 1.0);
return machEps;
}
/* This runs on GPU */
__device__ double machine_eps_flt_cuda()
{
typedef union
{
long i64;
double f64;
} flt_64;
flt_64 s;
s.f64 = 1.;
s.i64++;
return (s.f64 - 1.);
}
__device__ static double alpha_AN(const int n, const int ro2type, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
/* IF (ro2type = 1) THEN m = 0.4 ! primary RO2
ELSE IF (ro2type = 2) THEN m = 1. ! secondary RO2
ELSE IF (ro2type = 3) THEN m = 0.3 ! tertiary RO2
ELSE m = 1.
*/
double m = 1.;
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m;
return alpha_a;
}
__device__ static double alpha_AN(const int n, const int ro2type, const int bcarb, const int gcarb, const int abic, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
double bcf=1., gcf=1., abf=1.;
double m = 1.; //According to Teng, ref3189
if (bcarb == 1) { bcf = 0.19; }// derived from Praske, ref3190: alpha_AN = 0.03 for the secondary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (gcarb == 1) {gcf = 0.44; }// derived from Praske, ref3190: alpha_AN = 0.07 for the primary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (abic == 1) { abf = 0.24; }// derived from the ratio of AN- yield for toluene from Elrod et al. (ref3180), 5.5 0x1.9206e69676542p+ 229t &
// 200 torr, and this SAR for linear alkyl RO2 with 9 heavy atoms, 23.3%
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m*bcf*gcf*abf;
return alpha_a;
}
__device__ static double k_RO2_HO2(const double temp, const int nC){
return 2.91e-13*exp(1300./temp)*(1.-exp(-0.245*nC)); // ref1630
}
__device__ double ros_ErrorNorm(double * __restrict__ var, double * __restrict__ varNew, double * __restrict__ varErr,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const int vectorTol )
{
double err, scale, varMax;
err = ZERO;
if (vectorTol){
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr[i]);
prefetch_ll1(&absTol[i]);
prefetch_ll1(&relTol[i]);
prefetch_ll1(&var[i]);
prefetch_ll1(&varNew[i]);
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var[i]),fabs(varNew[i]));
scale = absTol[i]+ relTol[i]*varMax;
err += pow((double)varErr[i]/scale,2.0);
}
err = sqrt((double) err/NVAR);
}else{
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr[i]);
prefetch_ll1(&var[i]);
prefetch_ll1(&varNew[i]);
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var[i]),fabs(varNew[i]));
scale = absTol[0]+ relTol[0]*varMax;
err += pow((double)varErr[i]/scale,2.0);
}
err = sqrt((double) err/NVAR);
}
return err;
}
=#=#=#=#=#=#=#=#=#=#=kppSolve=#=#=#=#=#=#=#=#=#=#=
__device__ void ros_Solve(double * __restrict__ Ghimj, double * __restrict__ K, int &Nsol, const int istage, const int ros_S)
{
#pragma unroll 4
for (int i=0;i<LU_NONZERO-16;i+=16){
prefetch_ll1(&Ghimj[i]);
}
kppSolve(Ghimj, K, istage, ros_S);
Nsol++;
}
=#=#=#=#=#=#=#=#=#=#=kppDecomp=#=#=#=#=#=#=#=#=#=#=
__device__ void ros_Decomp(double * __restrict__ Ghimj, int &Ndec, int VL_GLO)
{
kppDecomp(Ghimj, VL_GLO);
Ndec++;
}
=#=#=#=#=#=#=#=#=#=#=ros_PrepareMatrix=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=Jac_sp=#=#=#=#=#=#=#=#=#=#=
=#=#=#=#=#=#=#=#=#=#=Fun=#=#=#=#=#=#=#=#=#=#=
__device__ void ros_FunTimeDerivative(const double T, double roundoff, double * __restrict__ var, const double * __restrict__ fix,
const double * __restrict__ rconst, double *dFdT, double *Fcn0, int &Nfun,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
const double DELTAMIN = 1.0E-6;
double delta,one_over_delta;
delta = sqrt(roundoff)*fmax(DELTAMIN,fabs(T));
one_over_delta = 1.0/delta;
Fun(var, fix, rconst, dFdT, Nfun, VL_GLO);
for (int i=0; i < NVAR; i++){
dFdT(index,i) = (dFdT(index,i) - Fcn0(index,i)) * one_over_delta;
}
}
__device__ static int ros_Integrator(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Rosenbrock method coefficients
const int ros_S, const double * __restrict__ ros_M, const double * __restrict__ ros_E, const double * __restrict__ ros_A, const double * __restrict__ ros_C,
const double * __restrict__ ros_Alpha, const double * __restrict__ ros_Gamma, const double ros_ELO, const int * ros_NewF,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double H, Hnew, HC, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// ~~~> Time loop begins below
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, ros_Gamma[0], jac0, Ghimj, Nsng, Ndec, VL_GLO);
// ~~~> Compute the stages
// Stage:
for (int istage=0; istage < ros_S; istage++)
{
// For the 1st istage the function has been computed previously
if (istage == 0)
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = Fcn0(index,i); // FCN0 Read
}
}
else if(ros_NewF[istage])
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = var(index,i);
}
for (int j=0; j < (istage); j++){
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i);
}
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
}
for (int i=0; i<NVAR; i++)
K(index,istage,i) = varNew(index,i);
for (int j=0; j<(istage); j++)
{
HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,j,i);
K(index,istage,i) += tmp*HC;
}
}
if ((!autonomous) && (ros_Gamma[istage] ))
{
HG = direction*H*ros_Gamma[istage];
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
double tmpNew = var(index,i); /// VAR READ
double tmpErr = ZERO;
for (int j=0; j<ros_S; j++){
double tmp = K(index,j,i);
#ifdef DEBUG
if (isnan(tmp)){
printf("Solver detected NAN!");
tmp = 0;
}
#endif
tmpNew += tmp*ros_M[j];
tmpErr += tmp*ros_E[j];
}
varNew(index,i) = tmpNew; // varNew is killed
varErr(index,i) = tmpErr;
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/ros_ELO)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
typedef struct {
double ros_A[15];
double ros_C[15];
int ros_NewF[8];
double ros_M[6];
double ros_E[6];
double ros_Alpha[6];
double ros_Gamma[6];
double ros_ELO;
int ros_S;
} ros_t;
/*
* Lookup tables for different ROS for branch elimination. It is much faster in GPU.
*/
__device__ __constant__ ros_t ros[5] = {
{
{.58578643762690495119831127579030,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-1.17157287525380990239662255158060,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{.87867965644035742679746691368545,.29289321881345247559915563789515,0,0,0,0}, /* ros_M */
{.29289321881345247559915563789515,.29289321881345247559915563789515,0,0,0,0}, /* ros_E */
{0,1.0,0,0,0,0}, /* ros_Alpha */
{1.70710678118654752440084436210485,-1.70710678118654752440084436210485,0,0,0,0}, /* ros_Gamma */
2.0, /* ros_ELO */
2, /* ros_S*/
}, /* Ros2 */
{
{1.0,1.0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.10156171083877702091975600115545E+01, 0.40759956452537699824805835358067E+01,0.92076794298330791242156818474003E+01,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{0.1E+01,0.61697947043828245592553615689730E+01,-0.42772256543218573326238373806514E+00,0,0,0}, /* ros_M */
{0.5E+00,- 0.29079558716805469821718236208017E+01,0.22354069897811569627360909276199E+00,0,0,0}, /* ros_E */
{0.0E+00,0.43586652150845899941601945119356E+00,0.43586652150845899941601945119356E+00,0,0,0}, /* ros_Alpha */
{0.43586652150845899941601945119356E+00,0.24291996454816804366592249683314E+00,0.21851380027664058511513169485832E+01,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
3
}, /* Ros3 */
{
{0.2000000000000000E+01, 0.1867943637803922E+01, 0.2344449711399156E+00, 0.1867943637803922E+01, 0.2344449711399156E+00,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.7137615036412310E+01,0.2580708087951457E+01,0.6515950076447975E+00, - 0.2137148994382534E+01, - 0.3214669691237626E+00, - 0.6949742501781779E+00 ,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,1,0,0,0,0,0}, /* ros_NewF */
{0.2255570073418735E+01, 0.2870493262186792E+00, 0.4353179431840180E+00, 0.1093502252409163E+01,0,0}, /* ros_M */
{ -0.2815431932141155E+00, -0.7276199124938920E-01, -0.1082196201495311E+00, -0.1093502252409163E+01, 0, 0}, /* ros_E */
{0.0, 0.1145640000000000E+01, 0.6552168638155900E+00, 0.6552168638155900E+00,0,0}, /* ros_Alpha */
{ 0.5728200000000000E+00, -0.1769193891319233E+01, 0.7592633437920482E+00, -0.1049021087100450E+00,0,0}, /* ros_Gamma */
4.0, /* ros_ELO */
4
}, /* Ros4 */
{
{ 0.0E+00, 2.0E+00, 0.0E+00, 2.0E+00, 0.0E+00, 1.0E+00, 0,0,0,0,0,0,0,0,0}, /* ros_A */
{ 4.0E+00, 1.0E+00, - 1.0E+00, 1.0E+00, - 1.0E+00, - 2.66666666666666666666666666666666, 0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,0,1,1,0,0,0,0}, /* ros_NewF */
{2.0,0,1.0,1.0,0,0}, /* ros_M */
{0,0,0,1.0,0,0}, /* ros_E */
{0,0,1.0,1.0,0,0}, /* ros_Alpha */
{0.5,1.5,0,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
4
}, /* Rodas3 */
{
{
0.1544000000000000E+01, 0.9466785280815826E+00, 0.2557011698983284E+00, 0.3314825187068521E+01,
0.2896124015972201E+01, 0.9986419139977817E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 1.0E+00}, /* ros_A */
{
-0.5668800000000000E+01, -0.2430093356833875E+01, -0.2063599157091915E+00, -0.1073529058151375E+00,
-0.9594562251023355E+01, -0.2047028614809616E+02, 0.7496443313967647E+01, -0.1024680431464352E+02,
-0.3399990352819905E+02, 0.1170890893206160E+02, 0.8083246795921522E+01, -0.7981132988064893E+01,
-0.3152159432874371E+02, 0.1631930543123136E+02, -0.6058818238834054E+01}, /* ros_C */
{1,1,1,1,1,1,0,0}, /* ros_NewF */
{0.1221224509226641E+01,0.6019134481288629E+01,0.1253708332932087E+02,- 0.6878860361058950E+00,1,1}, /* ros_M */
{0,0,0,0,0,1.0}, /* ros_E */
{0.000, 0.386, 0.210, 0.630, 1.000, 1.000}, /* ros_Alpha */
{0.2500000000000000E+00, -0.1043000000000000E+00, 0.1035000000000000E+00, 0.3620000000000023E-01, 0, 0}, /* ros_Gamma */
4.0, /* ros_ELO */
6
} /* Rodas4 */
};
//__device__ double rconst_local[MAX_VL_GLO*NREACT];
/* Initialize rconst local */
//__device__ double * rconst_local;
__device__ double k_3rd(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (usually fc=0.6)
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, k_3rd_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
k_3rd_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio),2)));
return k_3rd_r;
}
__device__ double k_3rd_iupac(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (e.g. 0.45 or 0.6...)
* nu N
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, nu, k_3rd_iupac_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
nu = 0.75- 1.27*log10(fc);
k_3rd_iupac_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio)/nu,2)));
return k_3rd_iupac_r;
}
double * temp_gpu;
double * press_gpu;
double * cair_gpu;
=#=#=#=#=#=#=#=#=#=#=update_rconst=#=#=#=#=#=#=#=#=#=#=
__global__
void Rosenbrock(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
// values calculated from icntrl and rcntrl at host
const int autonomous, const int vectorTol, const int UplimTol, const int method, const int Max_no_steps,
double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
// cuda global mem buffers
const double * __restrict__ absTol, const double * __restrict__ relTol,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// global input
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
// extra
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/*
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
double *Ghimj = &d_Ghimj[index*LU_NONZERO];
double *K = &d_K[index*NVAR*6];
double *varNew = &d_varNew[index*NVAR];
double *Fcn0 = &d_Fcn0[index*NVAR];
double *dFdT = &d_dFdT[index*NVAR];
double *jac0 = &d_jac0[index*LU_NONZERO];
double *varErr = &d_varErr[index*NVAR];
double *var = &d_var[index*NSPEC];
double *fix = &d_fix[index*NFIX];
double *rconst = &d_rconst[index*NREACT];
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* FIXME: add check for method */
const double *ros_A = &ros[method-1].ros_A[0];
const double *ros_C = &ros[method-1].ros_C[0];
const double *ros_M = &ros[method-1].ros_M[0];
const double *ros_E = &ros[method-1].ros_E[0];
const double *ros_Alpha = &ros[method-1].ros_Alpha[0];
const double *ros_Gamma = &ros[method-1].ros_Gamma[0];
const int *ros_NewF = &ros[method-1].ros_NewF[0];
const int ros_S = ros[method-1].ros_S;
const double ros_ELO = ros[method-1].ros_ELO;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO);
ros_Integrator(var, fix, Tstart, Tend, Texit,
// Rosenbrock method coefficients
ros_S, ros_M, ros_E, ros_A, ros_C,
ros_Alpha, ros_Gamma, ros_ELO, ros_NewF,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
=#=#=#=#=#=#=#=#=#=#=special_ros=#=#=#=#=#=#=#=#=#=#=
// no int8 in CUDA :(
__global__ void reduce_istatus_1(int *istatus, int4 *tmp_out_1, int4 *tmp_out_2, int VL_GLO, int *xNacc, int *xNrej)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int idx_1 = threadIdx.x;
int global_size = blockDim.x*gridDim.x;
int foo;
//no int8 in CUDA :(
int4 accumulator_1 = make_int4(0,0,0,0);
int4 accumulator_2 = make_int4(0,0,0,0);
while (index < VL_GLO)
{
accumulator_1.x += istatus(index,0);
accumulator_1.y += istatus(index,1);
accumulator_1.z += istatus(index,2);
//some dirty work on the side...
foo = istatus(index,3);
xNacc[index] = foo;
accumulator_1.w += foo;
foo = istatus(index,4);
xNrej[index] = foo;
accumulator_2.x += foo;
accumulator_2.y += istatus(index,5);
accumulator_2.z += istatus(index,6);
accumulator_2.w += istatus(index,7);
index += global_size;
}
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_1];
__shared__ int4 buffer_2[REDUCTION_SIZE_1];
buffer_1[idx_1] = accumulator_1;
buffer_2[idx_1] = accumulator_2;
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_out_1[blockIdx.x] = buffer_1[0];
tmp_out_2[blockIdx.x] = buffer_2[0];
}
}
__global__ void reduce_istatus_2(int4 *tmp_out_1, int4 *tmp_out_2, int *out)
{
int idx_1 = threadIdx.x;
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_2];
__shared__ int4 buffer_2[REDUCTION_SIZE_2];
buffer_1[idx_1] = tmp_out_1[idx_1];
buffer_2[idx_1] = tmp_out_2[idx_1];
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_1 = buffer_1[0];
tmp_2 = buffer_2[0];
out[0] = tmp_1.x;
out[1] = tmp_1.y;
out[2] = tmp_1.z;
out[3] = tmp_1.w;
out[4] = tmp_2.x;
out[5] = tmp_2.y;
out[6] = tmp_2.z;
out[7] = tmp_2.w;
}
}
/* Assuming different processes */
enum { TRUE=1, FALSE=0 } ;
double *d_conc, *d_temp, *d_press, *d_cair, *d_khet_st, *d_khet_tr, *d_jx, *d_jac0, *d_Ghimj, *d_varNew, *d_K, *d_varErr, *d_dFdT, *d_Fcn0, *d_var, *d_fix, *d_rconst;
int initialized = FALSE;
/* Device pointers pointing to GPU */
double *d_rstatus, *d_absTol, *d_relTol;
int *d_istatus, *d_istatus_rd, *d_xNacc, *d_xNrej;
int4 *d_tmp_out_1, *d_tmp_out_2;
/* Allocate arrays on device for Rosenbrock */
__host__ void init_first_time(int pe, int VL_GLO, int size_khet_st, int size_khet_tr, int size_jx ){
/* Select the proper GPU CARD */
int deviceCount, device;
gpuErrchk( cudaGetDeviceCount(&deviceCount) );
device = pe % deviceCount;
gpuErrchk( cudaSetDevice(device) );
printf("PE[%d]: selected %d of total %d\n",pe,device,deviceCount);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
gpuErrchk( cudaMalloc ((void **) &d_conc , sizeof(double)*VL_GLO*(NSPEC)) );
gpuErrchk( cudaMalloc ((void **) &d_khet_st, sizeof(double)*VL_GLO*size_khet_st) );
gpuErrchk( cudaMalloc ((void **) &d_khet_tr, sizeof(double)*VL_GLO*size_khet_tr) );
gpuErrchk( cudaMalloc ((void **) &d_jx , sizeof(double)*VL_GLO*size_jx) );
gpuErrchk( cudaMalloc ((void **) &d_rstatus , sizeof(double)*VL_GLO*2) );
gpuErrchk( cudaMalloc ((void **) &d_istatus , sizeof(int)*VL_GLO*8) );
gpuErrchk( cudaMalloc ((void **) &d_absTol , sizeof(double)*NVAR) );
gpuErrchk( cudaMalloc ((void **) &d_relTol , sizeof(double)*NVAR) );
/* Allocate input arrays */
gpuErrchk( cudaMalloc ((void **) &temp_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( cudaMalloc ((void **) &press_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( cudaMalloc ((void **) &cair_gpu , sizeof(double)*VL_GLO) );
/* Allocate arrays on device for reducing metrics */
gpuErrchk( cudaMalloc ((void **) &d_istatus_rd , sizeof(int)*8));
gpuErrchk( cudaMalloc ((void **) &d_tmp_out_1 , sizeof(int4)*64));
gpuErrchk( cudaMalloc ((void **) &d_tmp_out_2 , sizeof(int4)*64));
gpuErrchk( cudaMalloc ((void **) &d_xNacc , sizeof(int)*VL_GLO));
gpuErrchk( cudaMalloc ((void **) &d_xNrej , sizeof(int)*VL_GLO));
/* Allocate arrays for solvers on device global memory to reduce the stack usage */
gpuErrchk( cudaMalloc ((void **) &d_jac0, sizeof(double)*VL_GLO*LU_NONZERO) );
gpuErrchk( cudaMalloc ((void **) &d_Ghimj, sizeof(double)*VL_GLO*LU_NONZERO) );
gpuErrchk( cudaMalloc ((void **) &d_varNew, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( cudaMalloc ((void **) &d_Fcn0, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( cudaMalloc ((void **) &d_dFdT, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( cudaMalloc ((void **) &d_K, sizeof(double)*VL_GLO*NVAR*6) ); // TODO: Change size according to solver steps
gpuErrchk( cudaMalloc ((void **) &d_varErr, sizeof(double)*VL_GLO*NVAR) );
gpuErrchk( cudaMalloc ((void **) &d_var, sizeof(double)*VL_GLO*NSPEC) );
gpuErrchk( cudaMalloc ((void **) &d_fix, sizeof(double)*VL_GLO*NFIX) );
gpuErrchk( cudaMalloc ((void **) &d_rconst, sizeof(double)*VL_GLO*NREACT) );
initialized = TRUE;
}
/*
* TODO: We should call it in some point..
*/
extern "C" void finalize_cuda(){
/* Free memory on the device */
gpuErrchk( cudaFree(d_conc ) );
gpuErrchk( cudaFree(d_temp ) );
gpuErrchk( cudaFree(d_press ) );
gpuErrchk( cudaFree(d_cair ) );
gpuErrchk( cudaFree(d_khet_st ) );
gpuErrchk( cudaFree(d_khet_tr ) );
gpuErrchk( cudaFree(d_jx ) );
gpuErrchk( cudaFree(d_rstatus ) );
gpuErrchk( cudaFree(d_istatus ) );
gpuErrchk( cudaFree(d_absTol ) );
gpuErrchk( cudaFree(d_relTol ) );
gpuErrchk( cudaFree(d_istatus_rd ) );
gpuErrchk( cudaFree(d_tmp_out_1 ) );
gpuErrchk( cudaFree(d_tmp_out_2 ) );
gpuErrchk( cudaFree(d_xNacc ) );
gpuErrchk( cudaFree(d_xNrej ) );
gpuErrchk( cudaFree(temp_gpu ) );
gpuErrchk( cudaFree(press_gpu ) );
gpuErrchk( cudaFree(cair_gpu ) );
gpuErrchk( cudaFree(d_jac0 ) );
gpuErrchk( cudaFree(d_Ghimj ) );
gpuErrchk( cudaFree(d_varNew ) );
gpuErrchk( cudaFree(d_Fcn0 ) );
gpuErrchk( cudaFree(d_dFdT ) );
gpuErrchk( cudaFree(d_K ) );
gpuErrchk( cudaFree(d_varErr ) );
gpuErrchk( cudaFree(d_var ) );
gpuErrchk( cudaFree(d_fix ) );
gpuErrchk( cudaFree(d_rconst ) );
}
extern "C" void kpp_integrate_cuda_( int *pe_p, int *sizes, double *time_step_len_p, double *conc, double *temp, double *press, double *cair,
double *khet_st, double *khet_tr, double *jx, double *absTol, double *relTol, int *ierr, int *istatus,
int *xNacc, int *xNrej, double *rndoff, int *icntrl=NULL, double *rcntrl=NULL
)
/* // TODO
* Parameters:
* pe_p: scalar int - processor element
* VL_GLO: scalar int - size of the system
* NSPEC: scalar int - number of species
* NREACT: scalar int - number of reactions
* NVAR: scalar int -
*
* Input data:
* conc: 2D array of doubles - size: vl_glo x number of species
* temp: 1D array of doubles - size: vl_glo
* press: 1D array of doubles - size: vl_glo
* cair: 1D array of doubles - size: vl_glo
* khet_st: 2D array of doubles - size: vl_glo x number of species
* khet_tr: 2D array of doubles - size: vl_glo x number of species
* jx: 2D array of doubles - size: vl_glo x number of species
* absTol: 1D array of doubles - size: number of species
* relTol: 1D array of doubles - size: number of species
* Control:
* icntrl: 1D array of ints - size: 4
* sizes: 1D array of ints - size: 4
* rcntrl: 1D array of doubles - size: 7
*
*
*/
{
const double DELTAMIN = 1.0E-5;
int VL_GLO = sizes[0];
int size_khet_st = sizes[1];
int size_khet_tr = sizes[2];
int size_jx = sizes[3];
double roundoff = *rndoff;
double Tstart,Tend;
Tstart = ZERO;
Tend = *time_step_len_p;
int pe = *pe_p;
// variables from rcntrl and icntrl
int autonomous, vectorTol, UplimTol, method, Max_no_steps;
double Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe;
//int rcntrl_bool = 0, icntrl_bool=0;
if (rcntrl == NULL)
{
rcntrl = new double[7];
for (int i=0; i < 7; i++)
rcntrl[i] = 0.0;
}
if (icntrl == NULL)
{
icntrl = new int[4];
for (int i=0; i < 4; i++)
icntrl[i] = 0;
}
/* Allocate arrays on device for update_rconst kernel*/
if (initialized == FALSE) init_first_time(pe, VL_GLO, size_khet_st, size_khet_tr, size_jx);
/* Copy data from host memory to device memory */
gpuErrchk( cudaMemcpy(d_conc , conc , sizeof(double)*VL_GLO*NSPEC , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(temp_gpu , temp , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(press_gpu , press , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(cair_gpu , cair , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_khet_st, khet_st , sizeof(double)*VL_GLO*size_khet_st , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_khet_tr, khet_tr , sizeof(double)*VL_GLO*size_khet_tr , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_jx , jx , sizeof(double)*VL_GLO*size_jx , cudaMemcpyHostToDevice) );
/* Copy arrays from host memory to device memory for Rosenbrock */
gpuErrchk( cudaMemcpy(d_absTol, absTol, sizeof(double)*NVAR, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_relTol, relTol, sizeof(double)*NVAR, cudaMemcpyHostToDevice) );
/* Compute execution configuration for update_rconst */
int block_size, grid_size;
block_size = BLOCKSIZE;
grid_size = (VL_GLO + block_size - 1)/block_size;
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
/* Execute the kernel */
//update_rconst<<<dimGrid,dimBlock>>>(d_conc, d_khet_st, d_khet_tr, d_jx, VL_GLO);
GPU_DEBUG();
// *------------------------------------------------------*
// | Default values vs input settings (icntrl, rcntrl) |
// *------------------------------------------------------*
int ierr_tmp=0;
{
// autonomous or time dependent ODE. Default is time dependent.
autonomous = !(icntrl[0] == 0);
// For Scalar tolerances (icntrl[1].NE.0) the code uses absTol(0) and relTol(0)
// For Vector tolerances (icntrl[1] == 0) the code uses absTol(0:NVAR) and relTol(0:NVAR)
if (icntrl[1] == 0)
{
vectorTol = 1; //bool
UplimTol = NVAR;
}
else
{
vectorTol = 0;
UplimTol = 1;
}
// The particular Rosenbrock method chosen
if (icntrl[2] == 0)
{
method = 4;
}
else if ((icntrl[2] >= 1) && (icntrl[2] <= 5))
{
method = icntrl[2];
}
else
{
printf("User-selected Rosenbrock method: icntrl[2]=%d\n",method);
ierr_tmp = -2;
}
// The maximum number of steps admitted
if (icntrl[3] == 0)
{
Max_no_steps = 100000;
}
else if (icntrl[3] > 0)
{
Max_no_steps=icntrl[3];
}
else
{
printf("User-selected max no. of steps: icntrl[3]=%d\n",icntrl[3]);
ierr_tmp = -1;
}
// Unit roundoff (1+ roundoff>1)
roundoff = machine_eps_flt();
// Lower bound on the step size: (positive value)
if (rcntrl[0] == ZERO)
{
Hmin = ZERO;
}
else if (rcntrl[0] > ZERO)
{
Hmin = rcntrl[0];
}
else
{
printf("User-selected Hmin: rcntrl[0]=%f\n",rcntrl[0]);
ierr_tmp = -3;
}
// Upper bound on the step size: (positive value)
if (rcntrl[1] == ZERO)
{
Hmax = fabs(Tend-Tstart);
}
else if (rcntrl[1] > ZERO)
{
Hmax = fmin(fabs(rcntrl[1]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hmax: rcntrl[1]=%f\n",rcntrl[1]);
ierr_tmp = -3;
}
// Starting step size: (positive value)
if (rcntrl[2] == ZERO)
{
Hstart = fmax(Hmin,DELTAMIN);
}
else if (rcntrl[2] > ZERO)
{
Hstart = fmin(fabs(rcntrl[2]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hstart: rcntrl[2]=%f\n",rcntrl[2]);
ierr_tmp = -3;
}
// Step size can be changed s.t. FacMin < Hnew/Hexit < FacMax
if (rcntrl[3] == ZERO)
{
FacMin = 0.2;
}
else if (rcntrl[3] > ZERO)
{
FacMin = rcntrl[3];
}
else
{
printf("User-selected FacMin: rcntrl[3]=%f\n",rcntrl[3]);
ierr_tmp = -4;
}
if (rcntrl[4] == ZERO)
{
FacMax = 6.0;
}
else if (rcntrl[4] > ZERO)
{
FacMax = rcntrl[4];
}
else
{
printf("User-selected FacMax: rcntrl[4]=%f\n",rcntrl[4]);
ierr_tmp = -4;
}
// FacRej: Factor to decrease step after 2 succesive rejections
if (rcntrl[5] == ZERO)
{
FacRej = 0.1;
}
else if (rcntrl[5] > ZERO)
{
FacRej = rcntrl[5];
}
else
{
printf("User-selected FacRej: rcntrl[5]=%f\n",rcntrl[5]);
ierr_tmp = -4;
}
// FacSafe: Safety Factor in the computation of new step size
if (rcntrl[6] == ZERO)
{
FacSafe = 0.9;
}
else if (rcntrl[6] > ZERO)
{
FacSafe = rcntrl[6];
}
else
{
printf("User-selected FacSafe: rcntrl[6]=%f\n",rcntrl[6]);
ierr_tmp = -4;
}
// Check if tolerances are reasonable
for (int i=0; i < UplimTol; i++)
{
if ((absTol[i] <= ZERO) || (relTol[i] <= 10.0*roundoff) || (relTol[i] >= 1.0))
{
printf("CCC absTol(%d) = %f \n",i,absTol[i]);
printf("CCC relTol(%d) = %f \n",i,relTol[i]);
ierr_tmp = -5;
}
}
}
=#=#=#=#=#=#=#=#=#=#=call_kernel=#=#=#=#=#=#=#=#=#=#=
GPU_DEBUG();
reduce_istatus_1<<<REDUCTION_SIZE_2,REDUCTION_SIZE_1>>>(d_istatus, d_tmp_out_1, d_tmp_out_2, VL_GLO, d_xNacc, d_xNrej);
GPU_DEBUG();
reduce_istatus_2<<<1,REDUCTION_SIZE_2>>>(d_tmp_out_1, d_tmp_out_2, d_istatus_rd);
GPU_DEBUG();
/* Copy the result back */
gpuErrchk( cudaMemcpy( conc , d_conc , sizeof(double)*VL_GLO*NVAR, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy( xNacc , d_xNacc , sizeof(int)*VL_GLO , cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy( xNrej , d_xNrej , sizeof(int)*VL_GLO , cudaMemcpyDeviceToHost) );
return;
}
|
039d4c74f038c270848cf4f589c16e4cdd4fdda9.hip | // !!! This is a file automatically generated by hipify!!!
#include "tracer.hpp"
#include "rays.hpp"
#include <SDL.h>
#include <hip/hip_runtime.h>
Tracer::Tracer(App *app) {
buffer = new int[SCREEN_WIDTH * SCREEN_HEIGHT];
vertices = std::vector<vec3f>();
triangles = std::vector<vec3i>();
normals = std::vector<vec3f>();
this->frame = 0;
this->app = app;
// TODO: Fix to support window resizing
this->fovy = PI/2;
this->fovx = PI/2;
loadObjFile(triangles, vertices, normals);
this->ang = 45;
this->origin = {0, -200, -200};
this->click = 0;
vBuf = NULL;
nBuf = NULL;
tBuf = NULL;
gBuf = NULL;
CHECK(hipMalloc((void**)&vBuf, vertices.size() * sizeof(vec3f)));
CHECK(hipMalloc((void**)&nBuf, normals.size() * sizeof(vec3f)));
CHECK(hipMalloc((void**)&tBuf, 3 * triangles.size() * sizeof(int)));
CHECK(hipMalloc((void**)&gBuf, SCREEN_HEIGHT * SCREEN_WIDTH * sizeof(int)));
CHECK(hipMemcpy(nBuf, normals.data(), normals.size() * sizeof(vec3f), hipMemcpyHostToDevice));
CHECK(hipMemcpy(vBuf, vertices.data(), vertices.size() * sizeof(vec3f), hipMemcpyHostToDevice));
CHECK(hipMemcpy(tBuf, triangles.data(), 3 * triangles.size() * sizeof(int), hipMemcpyHostToDevice));
}
Tracer::~Tracer() {
delete[] buffer;
CHECK(hipFree(nBuf));
CHECK(hipFree(vBuf));
CHECK(hipFree(tBuf));
CHECK(hipFree(gBuf));
}
void Tracer::draw() const {
dim3 blocks(SCREEN_WIDTH/8, SCREEN_HEIGHT/8);
dim3 threads(8, 8);
CHECK(hipMemset(gBuf, 0, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(int)));
hipLaunchKernelGGL(( drawRay), dim3(blocks), dim3(threads), 0, 0, origin, ang*PI/180, vBuf, nBuf, triangles.size(), tBuf, triangles.size(), fovx, fovy, gBuf);
hipDeviceSynchronize();
CHECK(hipMemcpy(buffer, gBuf, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(int), hipMemcpyDeviceToHost));
}
void Tracer::run() {
while(!click) {
CHECK(hipMemcpy(vBuf, vertices.data(), vertices.size() * sizeof(vec3f), hipMemcpyHostToDevice));
CHECK(hipMemcpy(tBuf, triangles.data(), 3 * triangles.size() * sizeof(int), hipMemcpyHostToDevice));
auto start = CURRENT_TIME;
draw();
auto end = CURRENT_TIME;
int ms = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
printf("%dms x: %f, y: %f, z: %f, ang: %d\n", ms, origin.x, origin.y, origin.z, ang);
app->LDS_prepareScene(frame, buffer);
getInput();
app->LDS_presentScene();
frame += 1;
}
}
void Tracer::getInput() {
SDL_Event event;
while (SDL_PollEvent(&event)) {
switch (event.type) {
case SDL_QUIT:
exit(0);
break;
case SDL_MOUSEBUTTONDOWN:
sy1 = event.button.y;
sx1 = event.button.x;
break;
case SDL_MOUSEWHEEL:
if (event.wheel.y > 0)
origin.y += 0.1;
if (event.wheel.y < 0)
origin.y -= 0.1;
break;
case SDL_KEYDOWN:
switch(event.key.keysym.sym) {
case SDLK_ESCAPE:
exit(0);
break;
case 'a':
case SDLK_LEFT:
origin.x -= 1.0;
break;
case 'd':
case SDLK_RIGHT:
origin.x += 1.0;
break;
case 'w':
case SDLK_UP:
origin.z += 1.0;
break;
case 's':
case SDLK_DOWN:
origin.z -= 1.0;
break;
case 'q':
ang += 5;
break;
case 'e':
ang -= 5;
break;
}
break;
default:
break;
}
}
}
| 039d4c74f038c270848cf4f589c16e4cdd4fdda9.cu | #include "tracer.hpp"
#include "rays.hpp"
#include <SDL.h>
#include <cuda_runtime.h>
Tracer::Tracer(App *app) {
buffer = new int[SCREEN_WIDTH * SCREEN_HEIGHT];
vertices = std::vector<vec3f>();
triangles = std::vector<vec3i>();
normals = std::vector<vec3f>();
this->frame = 0;
this->app = app;
// TODO: Fix to support window resizing
this->fovy = PI/2;
this->fovx = PI/2;
loadObjFile(triangles, vertices, normals);
this->ang = 45;
this->origin = {0, -200, -200};
this->click = 0;
vBuf = NULL;
nBuf = NULL;
tBuf = NULL;
gBuf = NULL;
CHECK(cudaMalloc((void**)&vBuf, vertices.size() * sizeof(vec3f)));
CHECK(cudaMalloc((void**)&nBuf, normals.size() * sizeof(vec3f)));
CHECK(cudaMalloc((void**)&tBuf, 3 * triangles.size() * sizeof(int)));
CHECK(cudaMalloc((void**)&gBuf, SCREEN_HEIGHT * SCREEN_WIDTH * sizeof(int)));
CHECK(cudaMemcpy(nBuf, normals.data(), normals.size() * sizeof(vec3f), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(vBuf, vertices.data(), vertices.size() * sizeof(vec3f), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(tBuf, triangles.data(), 3 * triangles.size() * sizeof(int), cudaMemcpyHostToDevice));
}
Tracer::~Tracer() {
delete[] buffer;
CHECK(cudaFree(nBuf));
CHECK(cudaFree(vBuf));
CHECK(cudaFree(tBuf));
CHECK(cudaFree(gBuf));
}
void Tracer::draw() const {
dim3 blocks(SCREEN_WIDTH/8, SCREEN_HEIGHT/8);
dim3 threads(8, 8);
CHECK(cudaMemset(gBuf, 0, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(int)));
drawRay<<<blocks, threads>>>(origin, ang*PI/180, vBuf, nBuf, triangles.size(), tBuf, triangles.size(), fovx, fovy, gBuf);
cudaDeviceSynchronize();
CHECK(cudaMemcpy(buffer, gBuf, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(int), cudaMemcpyDeviceToHost));
}
void Tracer::run() {
while(!click) {
CHECK(cudaMemcpy(vBuf, vertices.data(), vertices.size() * sizeof(vec3f), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(tBuf, triangles.data(), 3 * triangles.size() * sizeof(int), cudaMemcpyHostToDevice));
auto start = CURRENT_TIME;
draw();
auto end = CURRENT_TIME;
int ms = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
printf("%dms x: %f, y: %f, z: %f, ang: %d\n", ms, origin.x, origin.y, origin.z, ang);
app->LDS_prepareScene(frame, buffer);
getInput();
app->LDS_presentScene();
frame += 1;
}
}
void Tracer::getInput() {
SDL_Event event;
while (SDL_PollEvent(&event)) {
switch (event.type) {
case SDL_QUIT:
exit(0);
break;
case SDL_MOUSEBUTTONDOWN:
sy1 = event.button.y;
sx1 = event.button.x;
break;
case SDL_MOUSEWHEEL:
if (event.wheel.y > 0)
origin.y += 0.1;
if (event.wheel.y < 0)
origin.y -= 0.1;
break;
case SDL_KEYDOWN:
switch(event.key.keysym.sym) {
case SDLK_ESCAPE:
exit(0);
break;
case 'a':
case SDLK_LEFT:
origin.x -= 1.0;
break;
case 'd':
case SDLK_RIGHT:
origin.x += 1.0;
break;
case 'w':
case SDLK_UP:
origin.z += 1.0;
break;
case 's':
case SDLK_DOWN:
origin.z -= 1.0;
break;
case 'q':
ang += 5;
break;
case 'e':
ang -= 5;
break;
}
break;
default:
break;
}
}
}
|
70bf54aece54b81642b9b8585cca623e5f466658.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2014, Julian Straub <[email protected]>
* Licensed under the MIT license. See the license file LICENSE.
*/
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h)//, float *dbg)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
unsigned short k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] = sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] = sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] = sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k && J_shared[k*BLOCK_SIZE]!=0 ) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void squaredAngleCostFctAssignment(float *cost, uint32_t* N,
float *x, unsigned short *z, float* errs, float *mu, float sigma_sq,
int w, int h)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Ni[BLOCK_SIZE];
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Ni[tid] = 0;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
unsigned short k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (unsigned short k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] = (err_min*err_min)/sigma_sq;
Ni[tid] = 1;
}
z[id] = k_min;
errs[id] = err_min;
if(X_STEP == 8)
{
x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Ni[tid] += Ni[tid + s];
}
__syncthreads();
}
if(tid==0 && rho[0]!=0.0f) {
atomicAdd(&cost[0],rho[0]);
}
if(tid==1 && Ni[0]!=0 ) {
atomicAdd(N,Ni[0]);
}
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// Ni[tid] += Ni[tid + s];
// __syncthreads();
// }
//
// if(tid==0 && Ni[0]!=0 ) {
// atomicAdd(N,Ni[0]);
// }
}
| 70bf54aece54b81642b9b8585cca623e5f466658.cu | /* Copyright (c) 2014, Julian Straub <[email protected]>
* Licensed under the MIT license. See the license file LICENSE.
*/
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h)//, float *dbg)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
unsigned short k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] = sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] = sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] = sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k && J_shared[k*BLOCK_SIZE]!=0 ) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void squaredAngleCostFctAssignment(float *cost, uint32_t* N,
float *x, unsigned short *z, float* errs, float *mu, float sigma_sq,
int w, int h)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Ni[BLOCK_SIZE];
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Ni[tid] = 0;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
unsigned short k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (unsigned short k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] = (err_min*err_min)/sigma_sq;
Ni[tid] = 1;
}
z[id] = k_min;
errs[id] = err_min;
if(X_STEP == 8)
{
x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Ni[tid] += Ni[tid + s];
}
__syncthreads();
}
if(tid==0 && rho[0]!=0.0f) {
atomicAdd(&cost[0],rho[0]);
}
if(tid==1 && Ni[0]!=0 ) {
atomicAdd(N,Ni[0]);
}
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// Ni[tid] += Ni[tid + s];
// __syncthreads();
// }
//
// if(tid==0 && Ni[0]!=0 ) {
// atomicAdd(N,Ni[0]);
// }
}
|
4510bb2f54ce8fd17334c4769f66dc2442ac028d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/index_add_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/utils/flags.h"
PD_DECLARE_bool(cudnn_deterministic);
namespace phi {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename IndexT>
__global__ void index_add_cuda_kernel(const T* input,
const IndexT* index,
const T* add_value,
int64_t N,
int64_t stride,
int64_t size,
int64_t delta,
T* output) {
CUDA_KERNEL_LOOP_TYPE(idx, N, int64_t) {
int64_t pre_idx = idx / (stride * size);
int64_t dim_idx = idx % (stride * size) / stride;
IndexT src_dim_idx = index[dim_idx];
int64_t input_idx =
idx + (delta * pre_idx + src_dim_idx - dim_idx) * stride;
phi::CudaAtomicAdd(&output[input_idx], add_value[idx]);
}
}
template <typename T, typename Context>
void IndexAddKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& index,
const DenseTensor& add_value,
int axis,
DenseTensor* output) {
auto input_dim = x.dims();
auto output_dim = output->dims();
auto add_value_dim = add_value.dims();
const auto& index_type = index.dtype();
int dim = axis;
dim = dim >= 0 ? dim : dim + input_dim.size();
auto stride_dim = phi::stride(input_dim);
int64_t stride = stride_dim[dim];
int64_t size = add_value_dim[dim];
int64_t delta = input_dim[dim] - size;
auto* in_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(output);
auto* add_value_data = add_value.data<T>();
int64_t numel = add_value.numel();
if (numel == 0) {
return;
}
auto stream = ctx.stream();
unsigned int block_dim = PADDLE_CUDA_NUM_THREADS;
dim3 grid_dim = dim3((numel + block_dim - 1) / block_dim);
phi::backends::gpu::LimitGridDim(ctx, &grid_dim);
// copy input to output.
// todo(@limin29): inplace do not need copy.
phi::Copy(ctx, x, ctx.GetPlace(), false, output);
if (FLAGS_cudnn_deterministic) {
VLOG(2) << "Run grad kernel of index_add with single thread.";
block_dim = 1;
grid_dim.x = 1;
}
if (index_type == phi::DataType::INT64) {
const int64_t* index_data = index.data<int64_t>();
hipLaunchKernelGGL(( index_add_cuda_kernel<T, int64_t>)
, dim3(grid_dim), dim3(block_dim), 0, stream, in_data,
index_data,
add_value_data,
numel,
stride,
size,
delta,
out_data);
} else {
const int* index_data = index.data<int>();
hipLaunchKernelGGL(( index_add_cuda_kernel<T, int>)
, dim3(grid_dim), dim3(block_dim), 0, stream, in_data,
index_data,
add_value_data,
numel,
stride,
size,
delta,
out_data);
}
}
} // namespace phi
PD_REGISTER_KERNEL(index_add,
GPU,
ALL_LAYOUT,
phi::IndexAddKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {}
| 4510bb2f54ce8fd17334c4769f66dc2442ac028d.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/index_add_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/utils/flags.h"
PD_DECLARE_bool(cudnn_deterministic);
namespace phi {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename IndexT>
__global__ void index_add_cuda_kernel(const T* input,
const IndexT* index,
const T* add_value,
int64_t N,
int64_t stride,
int64_t size,
int64_t delta,
T* output) {
CUDA_KERNEL_LOOP_TYPE(idx, N, int64_t) {
int64_t pre_idx = idx / (stride * size);
int64_t dim_idx = idx % (stride * size) / stride;
IndexT src_dim_idx = index[dim_idx];
int64_t input_idx =
idx + (delta * pre_idx + src_dim_idx - dim_idx) * stride;
phi::CudaAtomicAdd(&output[input_idx], add_value[idx]);
}
}
template <typename T, typename Context>
void IndexAddKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& index,
const DenseTensor& add_value,
int axis,
DenseTensor* output) {
auto input_dim = x.dims();
auto output_dim = output->dims();
auto add_value_dim = add_value.dims();
const auto& index_type = index.dtype();
int dim = axis;
dim = dim >= 0 ? dim : dim + input_dim.size();
auto stride_dim = phi::stride(input_dim);
int64_t stride = stride_dim[dim];
int64_t size = add_value_dim[dim];
int64_t delta = input_dim[dim] - size;
auto* in_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(output);
auto* add_value_data = add_value.data<T>();
int64_t numel = add_value.numel();
if (numel == 0) {
return;
}
auto stream = ctx.stream();
unsigned int block_dim = PADDLE_CUDA_NUM_THREADS;
dim3 grid_dim = dim3((numel + block_dim - 1) / block_dim);
phi::backends::gpu::LimitGridDim(ctx, &grid_dim);
// copy input to output.
// todo(@limin29): inplace do not need copy.
phi::Copy(ctx, x, ctx.GetPlace(), false, output);
if (FLAGS_cudnn_deterministic) {
VLOG(2) << "Run grad kernel of index_add with single thread.";
block_dim = 1;
grid_dim.x = 1;
}
if (index_type == phi::DataType::INT64) {
const int64_t* index_data = index.data<int64_t>();
index_add_cuda_kernel<T, int64_t>
<<<grid_dim, block_dim, 0, stream>>>(in_data,
index_data,
add_value_data,
numel,
stride,
size,
delta,
out_data);
} else {
const int* index_data = index.data<int>();
index_add_cuda_kernel<T, int>
<<<grid_dim, block_dim, 0, stream>>>(in_data,
index_data,
add_value_data,
numel,
stride,
size,
delta,
out_data);
}
}
} // namespace phi
PD_REGISTER_KERNEL(index_add,
GPU,
ALL_LAYOUT,
phi::IndexAddKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {}
|
663a935ec69c084273b40c381bc457c9c4aa77a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Raffaele Solca
@author Mark Gates
@generated from zlaset_band.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#define NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void claset_band_upper(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaFloatComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void claset_band_lower(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaFloatComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
CLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as CLASET_BAND, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute CLASET in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claset_band_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/NB + 1 );
hipLaunchKernelGGL(( claset_band_upper), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/NB + 1 );
hipLaunchKernelGGL(( claset_band_lower), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_claset_band_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex_ptr dA, magma_int_t ldda)
{
magmablas_claset_band_q(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
| 663a935ec69c084273b40c381bc457c9c4aa77a6.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Raffaele Solca
@author Mark Gates
@generated from zlaset_band.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#define NB 64
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void claset_band_upper(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaFloatComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void claset_band_lower(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaFloatComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/**
Purpose
-------
CLASET_BAND_STREAM initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
This is the same as CLASET_BAND, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
A(i,i) = BETA , 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute CLASET in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claset_band_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( (min(m+k-1,n) - 1)/NB + 1 );
claset_band_upper<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( (min(m,n) - 1)/NB + 1 );
claset_band_lower<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
}
/**
@see magmablas_claset_band_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_claset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex_ptr dA, magma_int_t ldda)
{
magmablas_claset_band_q(uplo, m, n, k, offdiag, diag, dA, ldda, magma_stream);
}
|
6155692759dc1cf07ad7be5b61abb119be8a9de6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/*
This example demonstrates how to use the Cuda OpenGL bindings to
dynamically modify a vertex buffer using a Cuda kernel.
The steps are:
1. Create an empty vertex buffer object (VBO)
2. Register the VBO with Cuda
3. Map the VBO for writing from Cuda
4. Run Cuda kernel to modify the vertex positions
5. Unmap the VBO
6. Render the results using OpenGL
Host code
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_math.h>
texture<unsigned char, 3, hipReadModeNormalizedFloat> tex; // 3D texture
hipArray *d_volumeArray = 0;
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
//// calculate simple sine wave pattern
//float freq = 4.0f;
//float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
float voxel = tex3D(tex, u, v, 0.0f);
// write output vertex
pos[y*width + x] = make_float4(u, voxel, v, 1.0f);
}
extern "C"
void render_kernel(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
simple_vbo_kernel << < grid, block >> >(pos, mesh_width, mesh_height, time);
}
extern "C"
void initCuda(const unsigned char *h_volume, hipExtent volumeSize)
{
// create 3D array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<unsigned char>();
checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void *)h_volume, volumeSize.width * sizeof(unsigned char), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(©Params));
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = hipFilterModeLinear; // linear interpolation
tex.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
tex.addressMode[1] = hipAddressModeWrap;
tex.addressMode[2] = hipAddressModeWrap;
// bind array to 3D texture
checkCudaErrors(hipBindTextureToArray(tex, d_volumeArray, channelDesc));
}
| 6155692759dc1cf07ad7be5b61abb119be8a9de6.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/*
This example demonstrates how to use the Cuda OpenGL bindings to
dynamically modify a vertex buffer using a Cuda kernel.
The steps are:
1. Create an empty vertex buffer object (VBO)
2. Register the VBO with Cuda
3. Map the VBO for writing from Cuda
4. Run Cuda kernel to modify the vertex positions
5. Unmap the VBO
6. Render the results using OpenGL
Host code
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_math.h>
texture<unsigned char, 3, cudaReadModeNormalizedFloat> tex; // 3D texture
cudaArray *d_volumeArray = 0;
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
//// calculate simple sine wave pattern
//float freq = 4.0f;
//float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
float voxel = tex3D(tex, u, v, 0.0f);
// write output vertex
pos[y*width + x] = make_float4(u, voxel, v, 1.0f);
}
extern "C"
void render_kernel(float4 *pos, unsigned int mesh_width,
unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
simple_vbo_kernel << < grid, block >> >(pos, mesh_width, mesh_height, time);
}
extern "C"
void initCuda(const unsigned char *h_volume, cudaExtent volumeSize)
{
// create 3D array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned char>();
checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void *)h_volume, volumeSize.width * sizeof(unsigned char), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = cudaFilterModeLinear; // linear interpolation
tex.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
tex.addressMode[1] = cudaAddressModeWrap;
tex.addressMode[2] = cudaAddressModeWrap;
// bind array to 3D texture
checkCudaErrors(cudaBindTextureToArray(tex, d_volumeArray, channelDesc));
}
|
001332e321833ab5275449e8962b886ce3a5c139.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h> // hipMalloc, hipMemcpy, etc.
#include <hipsparse.h> // hipsparseSpMM
#include <stdio.h> // printf
#include <stdlib.h> // EXIT_FAILURE
#define CHECK_CUDA(func) \
{ \
hipError_t status = (func); \
if (status != hipSuccess) { \
printf("CUDA API failed at line %d with error: %s (%d)\n", \
__LINE__, hipGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
#define CHECK_CUSPARSE(func) \
{ \
hipsparseStatus_t status = (func); \
if (status != HIPSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE API failed at line %d with error: %s (%d)\n", \
__LINE__, hipsparseGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
int generate_random_dense_matrix(int M, int N, float **outA, float density)
{
int i, j;
double rMax = (double)RAND_MAX;
float *A = (float *)malloc(sizeof(float) * M * N);
int totalNnz = 0;
for (j = 0; j < N; j++)
{
for (i = 0; i < M; i++)
{
int r = rand();
double dr = (double)r;
float *curr = A + (j * M + i);
if (dr / rMax > density)
{
*curr = 0.0f;
}
else
{
*curr = (dr / rMax) * 100.0;
}
if (*curr != 0.0f)
{
totalNnz++;
}
}
}
*outA = A;
return totalNnz;
}
int main(void) {
// Q, V: [seq-len, head-size] = [512, 64]
// K: [head-size, seq-len] = [64, 512]
// S: [seq-len, seq-len] = [512, 512]
int SEQ_LEN = 512;
int HEAD_SIZE = 64;
float ATTN_DENSITY = 0.1;
int QKV_SIZE = SEQ_LEN * HEAD_SIZE;
int ldk = SEQ_LEN;
int ldq = HEAD_SIZE;
int ldv = SEQ_LEN;
int ldo = SEQ_LEN;
float alpha = 1.0f;
float beta = 0.0f;
float *dQ, *dV, *dK, *dS, *dCsrValS, *dO;
float *hQ, *hV, *hK, *hS;
int *dCsrRowPtrS, *dCsrColIndS, *dSNnzPerRow;
void *dBuffer1, *dBuffer2, *dBuffer3;
size_t bufferSize = 0;
hipsparseSpMatDescr_t Sdescr;
hipsparseDnMatDescr_t Kdescr, Qdescr, Vdescr, Odescr, SdescrDense;
hipsparseHandle_t handle;
CHECK_CUSPARSE(hipsparseCreate(&handle));
float time_kernel;
hipEvent_t start_event, stop_event;
CHECK_CUDA(hipEventCreateWithFlags(&start_event, hipEventBlockingSync));
CHECK_CUDA(hipEventCreateWithFlags(&stop_event, hipEventBlockingSync));
size_t N_REPEAT = 10000;
//// initialize dense Q, K, V, O
hQ = (float *)calloc(QKV_SIZE, sizeof(float));
hV = (float *)calloc(QKV_SIZE, sizeof(float));
hK = (float *)calloc(QKV_SIZE, sizeof(float));
CHECK_CUDA(hipMalloc((void **)&dQ, QKV_SIZE * sizeof(float)))
CHECK_CUDA(hipMalloc((void**) &dV, QKV_SIZE * sizeof(float)))
CHECK_CUDA(hipMalloc((void**) &dK, QKV_SIZE * sizeof(float)))
CHECK_CUDA(hipMalloc((void**) &dO, QKV_SIZE * sizeof(float)))
CHECK_CUDA(hipMemcpy(dQ, hQ, QKV_SIZE * sizeof(float), hipMemcpyHostToDevice))
CHECK_CUDA(hipMemcpy(dV, hV, QKV_SIZE * sizeof(float), hipMemcpyHostToDevice))
CHECK_CUDA(hipMemcpy(dK, hK, QKV_SIZE * sizeof(float), hipMemcpyHostToDevice))
CHECK_CUSPARSE(hipsparseCreateDnMat(&Kdescr, SEQ_LEN, HEAD_SIZE, ldk, dK, HIP_R_32F, HIPSPARSE_ORDER_COL))
CHECK_CUSPARSE(hipsparseCreateDnMat(&Qdescr, HEAD_SIZE, SEQ_LEN, ldq, dQ, HIP_R_32F, HIPSPARSE_ORDER_COL))
CHECK_CUSPARSE(hipsparseCreateDnMat(&Vdescr, SEQ_LEN, HEAD_SIZE, ldv, dV, HIP_R_32F, HIPSPARSE_ORDER_COL))
CHECK_CUSPARSE(hipsparseCreateDnMat(&Odescr, SEQ_LEN, HEAD_SIZE, ldo, dO, HIP_R_32F, HIPSPARSE_ORDER_COL))
//// initialize sparsity mask S in CSR format
int totalSNnz = generate_random_dense_matrix(SEQ_LEN, SEQ_LEN, &hS, ATTN_DENSITY);
CHECK_CUDA(hipMalloc((void **)&dS, sizeof(float) * SEQ_LEN * SEQ_LEN));
CHECK_CUDA(hipMalloc((void **)&dSNnzPerRow, sizeof(int) * SEQ_LEN));
CHECK_CUDA(hipMalloc((void **)&dCsrValS, sizeof(float) * totalSNnz));
CHECK_CUDA(hipMalloc((void **)&dCsrRowPtrS, sizeof(int) * (SEQ_LEN + 1)));
CHECK_CUDA(hipMalloc((void **)&dCsrColIndS, sizeof(int) * totalSNnz));
CHECK_CUDA(hipMemcpy(dS, hS, sizeof(float) * SEQ_LEN * SEQ_LEN, hipMemcpyHostToDevice));
CHECK_CUSPARSE(hipsparseCreateDnMat(&SdescrDense, SEQ_LEN, SEQ_LEN, SEQ_LEN, dS, HIP_R_32F, HIPSPARSE_ORDER_ROW))
CHECK_CUSPARSE(hipsparseCreateCsr(&Sdescr, SEQ_LEN, SEQ_LEN, 0,
dCsrRowPtrS, NULL, NULL,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_32F))
CHECK_CUSPARSE(hipsparseDenseToSparse_bufferSize(handle, SdescrDense, Sdescr, HIPSPARSE_DENSETOSPARSE_ALG_DEFAULT, &bufferSize))
CHECK_CUDA(hipMalloc(&dBuffer3, bufferSize))
CHECK_CUSPARSE(hipsparseDenseToSparse_analysis(handle, SdescrDense, Sdescr, HIPSPARSE_DENSETOSPARSE_ALG_DEFAULT, dBuffer3))
CHECK_CUSPARSE(hipsparseCsrSetPointers(Sdescr, dCsrRowPtrS, dCsrColIndS, dCsrValS))
CHECK_CUSPARSE(hipsparseDenseToSparse_convert(handle, SdescrDense, Sdescr, HIPSPARSE_DENSETOSPARSE_ALG_DEFAULT, dBuffer3))
/// Allocate external buffers for SDDMM and SpMM
CHECK_CUSPARSE(cusparseConstrainedGeMM_bufferSize(
handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Kdescr, Qdescr, &beta, Sdescr,
HIP_R_32F, &bufferSize))
CHECK_CUDA(hipMalloc(&dBuffer1, bufferSize))
// allocate an external buffer if needed
CHECK_CUSPARSE(hipsparseSpMM_bufferSize(
handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Sdescr, Vdescr, &beta, Odescr, HIP_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
CHECK_CUDA(hipMalloc(&dBuffer2, bufferSize))
/// Run SDDMM and SpMM
// warmup
CHECK_CUSPARSE(cusparseConstrainedGeMM(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Kdescr, Qdescr, &beta, Sdescr,
HIP_R_32F, dBuffer1))
CHECK_CUSPARSE(hipsparseSpMM(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Sdescr, Vdescr, &beta, Odescr, HIP_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, dBuffer2))
CHECK_CUDA(hipEventRecord(start_event, 0));
for (size_t i = 0; i < N_REPEAT; i++) {
CHECK_CUSPARSE(cusparseConstrainedGeMM(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Kdescr, Qdescr, &beta, Sdescr,
HIP_R_32F, dBuffer1))
CHECK_CUSPARSE(hipsparseSpMM(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Sdescr, Vdescr, &beta, Odescr, HIP_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, dBuffer2))
}
CHECK_CUDA(hipEventRecord(stop_event, 0));
CHECK_CUDA(hipEventSynchronize(stop_event));
CHECK_CUDA(hipEventElapsedTime(&time_kernel, start_event, stop_event));
printf("kernel:\t\t%.4f ms\n", time_kernel / N_REPEAT);
/// cleanup
CHECK_CUSPARSE(hipsparseDestroySpMat(Sdescr))
CHECK_CUSPARSE(hipsparseDestroyDnMat(Kdescr))
CHECK_CUSPARSE(hipsparseDestroyDnMat(Qdescr))
CHECK_CUSPARSE(hipsparseDestroyDnMat(Vdescr))
CHECK_CUSPARSE(hipsparseDestroyDnMat(Odescr))
CHECK_CUSPARSE(hipsparseDestroyDnMat(SdescrDense))
CHECK_CUSPARSE(hipsparseDestroy(handle))
CHECK_CUDA(hipFree(dQ))
CHECK_CUDA(hipFree(dK))
CHECK_CUDA(hipFree(dV))
CHECK_CUDA(hipFree(dO))
CHECK_CUDA(hipFree(dS))
CHECK_CUDA(hipFree(dCsrColIndS))
CHECK_CUDA(hipFree(dCsrRowPtrS))
CHECK_CUDA(hipFree(dCsrValS))
CHECK_CUDA(hipFree(dSNnzPerRow))
CHECK_CUDA(hipFree(dBuffer1))
CHECK_CUDA(hipFree(dBuffer2))
return EXIT_SUCCESS;
}
| 001332e321833ab5275449e8962b886ce3a5c139.cu | #include <cuda_runtime_api.h> // cudaMalloc, cudaMemcpy, etc.
#include <cusparse.h> // cusparseSpMM
#include <stdio.h> // printf
#include <stdlib.h> // EXIT_FAILURE
#define CHECK_CUDA(func) \
{ \
cudaError_t status = (func); \
if (status != cudaSuccess) { \
printf("CUDA API failed at line %d with error: %s (%d)\n", \
__LINE__, cudaGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
#define CHECK_CUSPARSE(func) \
{ \
cusparseStatus_t status = (func); \
if (status != CUSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE API failed at line %d with error: %s (%d)\n", \
__LINE__, cusparseGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
int generate_random_dense_matrix(int M, int N, float **outA, float density)
{
int i, j;
double rMax = (double)RAND_MAX;
float *A = (float *)malloc(sizeof(float) * M * N);
int totalNnz = 0;
for (j = 0; j < N; j++)
{
for (i = 0; i < M; i++)
{
int r = rand();
double dr = (double)r;
float *curr = A + (j * M + i);
if (dr / rMax > density)
{
*curr = 0.0f;
}
else
{
*curr = (dr / rMax) * 100.0;
}
if (*curr != 0.0f)
{
totalNnz++;
}
}
}
*outA = A;
return totalNnz;
}
int main(void) {
// Q, V: [seq-len, head-size] = [512, 64]
// K: [head-size, seq-len] = [64, 512]
// S: [seq-len, seq-len] = [512, 512]
int SEQ_LEN = 512;
int HEAD_SIZE = 64;
float ATTN_DENSITY = 0.1;
int QKV_SIZE = SEQ_LEN * HEAD_SIZE;
int ldk = SEQ_LEN;
int ldq = HEAD_SIZE;
int ldv = SEQ_LEN;
int ldo = SEQ_LEN;
float alpha = 1.0f;
float beta = 0.0f;
float *dQ, *dV, *dK, *dS, *dCsrValS, *dO;
float *hQ, *hV, *hK, *hS;
int *dCsrRowPtrS, *dCsrColIndS, *dSNnzPerRow;
void *dBuffer1, *dBuffer2, *dBuffer3;
size_t bufferSize = 0;
cusparseSpMatDescr_t Sdescr;
cusparseDnMatDescr_t Kdescr, Qdescr, Vdescr, Odescr, SdescrDense;
cusparseHandle_t handle;
CHECK_CUSPARSE(cusparseCreate(&handle));
float time_kernel;
cudaEvent_t start_event, stop_event;
CHECK_CUDA(cudaEventCreateWithFlags(&start_event, cudaEventBlockingSync));
CHECK_CUDA(cudaEventCreateWithFlags(&stop_event, cudaEventBlockingSync));
size_t N_REPEAT = 10000;
//// initialize dense Q, K, V, O
hQ = (float *)calloc(QKV_SIZE, sizeof(float));
hV = (float *)calloc(QKV_SIZE, sizeof(float));
hK = (float *)calloc(QKV_SIZE, sizeof(float));
CHECK_CUDA(cudaMalloc((void **)&dQ, QKV_SIZE * sizeof(float)))
CHECK_CUDA(cudaMalloc((void**) &dV, QKV_SIZE * sizeof(float)))
CHECK_CUDA(cudaMalloc((void**) &dK, QKV_SIZE * sizeof(float)))
CHECK_CUDA(cudaMalloc((void**) &dO, QKV_SIZE * sizeof(float)))
CHECK_CUDA(cudaMemcpy(dQ, hQ, QKV_SIZE * sizeof(float), cudaMemcpyHostToDevice))
CHECK_CUDA(cudaMemcpy(dV, hV, QKV_SIZE * sizeof(float), cudaMemcpyHostToDevice))
CHECK_CUDA(cudaMemcpy(dK, hK, QKV_SIZE * sizeof(float), cudaMemcpyHostToDevice))
CHECK_CUSPARSE(cusparseCreateDnMat(&Kdescr, SEQ_LEN, HEAD_SIZE, ldk, dK, CUDA_R_32F, CUSPARSE_ORDER_COL))
CHECK_CUSPARSE(cusparseCreateDnMat(&Qdescr, HEAD_SIZE, SEQ_LEN, ldq, dQ, CUDA_R_32F, CUSPARSE_ORDER_COL))
CHECK_CUSPARSE(cusparseCreateDnMat(&Vdescr, SEQ_LEN, HEAD_SIZE, ldv, dV, CUDA_R_32F, CUSPARSE_ORDER_COL))
CHECK_CUSPARSE(cusparseCreateDnMat(&Odescr, SEQ_LEN, HEAD_SIZE, ldo, dO, CUDA_R_32F, CUSPARSE_ORDER_COL))
//// initialize sparsity mask S in CSR format
int totalSNnz = generate_random_dense_matrix(SEQ_LEN, SEQ_LEN, &hS, ATTN_DENSITY);
CHECK_CUDA(cudaMalloc((void **)&dS, sizeof(float) * SEQ_LEN * SEQ_LEN));
CHECK_CUDA(cudaMalloc((void **)&dSNnzPerRow, sizeof(int) * SEQ_LEN));
CHECK_CUDA(cudaMalloc((void **)&dCsrValS, sizeof(float) * totalSNnz));
CHECK_CUDA(cudaMalloc((void **)&dCsrRowPtrS, sizeof(int) * (SEQ_LEN + 1)));
CHECK_CUDA(cudaMalloc((void **)&dCsrColIndS, sizeof(int) * totalSNnz));
CHECK_CUDA(cudaMemcpy(dS, hS, sizeof(float) * SEQ_LEN * SEQ_LEN, cudaMemcpyHostToDevice));
CHECK_CUSPARSE(cusparseCreateDnMat(&SdescrDense, SEQ_LEN, SEQ_LEN, SEQ_LEN, dS, CUDA_R_32F, CUSPARSE_ORDER_ROW))
CHECK_CUSPARSE(cusparseCreateCsr(&Sdescr, SEQ_LEN, SEQ_LEN, 0,
dCsrRowPtrS, NULL, NULL,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F))
CHECK_CUSPARSE(cusparseDenseToSparse_bufferSize(handle, SdescrDense, Sdescr, CUSPARSE_DENSETOSPARSE_ALG_DEFAULT, &bufferSize))
CHECK_CUDA(cudaMalloc(&dBuffer3, bufferSize))
CHECK_CUSPARSE(cusparseDenseToSparse_analysis(handle, SdescrDense, Sdescr, CUSPARSE_DENSETOSPARSE_ALG_DEFAULT, dBuffer3))
CHECK_CUSPARSE(cusparseCsrSetPointers(Sdescr, dCsrRowPtrS, dCsrColIndS, dCsrValS))
CHECK_CUSPARSE(cusparseDenseToSparse_convert(handle, SdescrDense, Sdescr, CUSPARSE_DENSETOSPARSE_ALG_DEFAULT, dBuffer3))
/// Allocate external buffers for SDDMM and SpMM
CHECK_CUSPARSE(cusparseConstrainedGeMM_bufferSize(
handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Kdescr, Qdescr, &beta, Sdescr,
CUDA_R_32F, &bufferSize))
CHECK_CUDA(cudaMalloc(&dBuffer1, bufferSize))
// allocate an external buffer if needed
CHECK_CUSPARSE(cusparseSpMM_bufferSize(
handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Sdescr, Vdescr, &beta, Odescr, CUDA_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
CHECK_CUDA(cudaMalloc(&dBuffer2, bufferSize))
/// Run SDDMM and SpMM
// warmup
CHECK_CUSPARSE(cusparseConstrainedGeMM(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Kdescr, Qdescr, &beta, Sdescr,
CUDA_R_32F, dBuffer1))
CHECK_CUSPARSE(cusparseSpMM(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Sdescr, Vdescr, &beta, Odescr, CUDA_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, dBuffer2))
CHECK_CUDA(cudaEventRecord(start_event, 0));
for (size_t i = 0; i < N_REPEAT; i++) {
CHECK_CUSPARSE(cusparseConstrainedGeMM(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Kdescr, Qdescr, &beta, Sdescr,
CUDA_R_32F, dBuffer1))
CHECK_CUSPARSE(cusparseSpMM(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, Sdescr, Vdescr, &beta, Odescr, CUDA_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, dBuffer2))
}
CHECK_CUDA(cudaEventRecord(stop_event, 0));
CHECK_CUDA(cudaEventSynchronize(stop_event));
CHECK_CUDA(cudaEventElapsedTime(&time_kernel, start_event, stop_event));
printf("kernel:\t\t%.4f ms\n", time_kernel / N_REPEAT);
/// cleanup
CHECK_CUSPARSE(cusparseDestroySpMat(Sdescr))
CHECK_CUSPARSE(cusparseDestroyDnMat(Kdescr))
CHECK_CUSPARSE(cusparseDestroyDnMat(Qdescr))
CHECK_CUSPARSE(cusparseDestroyDnMat(Vdescr))
CHECK_CUSPARSE(cusparseDestroyDnMat(Odescr))
CHECK_CUSPARSE(cusparseDestroyDnMat(SdescrDense))
CHECK_CUSPARSE(cusparseDestroy(handle))
CHECK_CUDA(cudaFree(dQ))
CHECK_CUDA(cudaFree(dK))
CHECK_CUDA(cudaFree(dV))
CHECK_CUDA(cudaFree(dO))
CHECK_CUDA(cudaFree(dS))
CHECK_CUDA(cudaFree(dCsrColIndS))
CHECK_CUDA(cudaFree(dCsrRowPtrS))
CHECK_CUDA(cudaFree(dCsrValS))
CHECK_CUDA(cudaFree(dSNnzPerRow))
CHECK_CUDA(cudaFree(dBuffer1))
CHECK_CUDA(cudaFree(dBuffer2))
return EXIT_SUCCESS;
}
|
e584841c89f7018e624dbe5ceac84358f34c4768.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "head.h"
float *h_a;
float *h_b;
float *h_c;
float *d_a;
float *d_b;
float *d_c;
void CPU_malloc(){
size_t size = N*sizeof(float);
h_a = (float *)malloc(N*size);
h_b = (float *)malloc(size);
h_c = (float *)malloc(size);
}
void GPU_malloc(){
size_t size = N*sizeof(float);
hipError_t Error;
Error = hipMalloc((void**)&d_a,N*size);
printf("CUDA error(malloc d_a) = %s\n", hipGetErrorString(Error));
Error = hipMalloc((void**)&d_b,size);
printf("CUDA error(malloc d_b) = %s\n", hipGetErrorString(Error));
Error = hipMalloc((void**)&d_c,size);
printf("CUDA error(malloc d_c) = %s\n", hipGetErrorString(Error));
}
void Free(){
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
void Init(){
int i;
for(i=0;i<N*N;i++){
h_a[i] = i;
}
for(i=0;i<N;i++){
h_b[i] = 2*i;
}
}
void print(float *a){
int i;
for(i=0;i<N;i++){
printf("%f ", a[i]);
}
printf("\n");
}
void print_matrix(float *a){
int i, j;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%f ", a[i*N+j]);
}
printf("\n");
}
}
void Sent_to_device(){
size_t size = N*sizeof(float);
hipError_t Error;
Error = hipMemcpy(d_a, h_a, N*size, hipMemcpyHostToDevice);
printf("CUDA error(copy h_a) = %s\n", hipGetErrorString(Error));
Error = hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
printf("CUDA error(copy h_b) = %s\n", hipGetErrorString(Error));
}
void Sent_to_host(){
size_t size = N*sizeof(float);
hipError_t Error;
Error = hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
printf("CUDA error(copy d_c) = %s\n", hipGetErrorString(Error));
}
__global__ void GPU_product(float *x, float *y, float *z){
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j;
if(i<N){
z[i] = 0.0;
for(j=0;j<N;j++){
z[i] += x[i*N+j]*y[j];
}
}
}
void product1(){
int i, j;
for(i=0;i<N;i++){
h_c[i] = 0.0;
for(j=0;j<N;j++){
h_c[i] += h_a[i*N+j]*h_b[j];
}
}
}
void product2(){
int tpb = 256;
int bpg = (N+tpb-1)/tpb;
hipLaunchKernelGGL(( GPU_product), dim3(bpg), dim3(tpb), 0, 0, d_a, d_b, d_c);
}
| e584841c89f7018e624dbe5ceac84358f34c4768.cu | #include "head.h"
float *h_a;
float *h_b;
float *h_c;
float *d_a;
float *d_b;
float *d_c;
void CPU_malloc(){
size_t size = N*sizeof(float);
h_a = (float *)malloc(N*size);
h_b = (float *)malloc(size);
h_c = (float *)malloc(size);
}
void GPU_malloc(){
size_t size = N*sizeof(float);
cudaError_t Error;
Error = cudaMalloc((void**)&d_a,N*size);
printf("CUDA error(malloc d_a) = %s\n", cudaGetErrorString(Error));
Error = cudaMalloc((void**)&d_b,size);
printf("CUDA error(malloc d_b) = %s\n", cudaGetErrorString(Error));
Error = cudaMalloc((void**)&d_c,size);
printf("CUDA error(malloc d_c) = %s\n", cudaGetErrorString(Error));
}
void Free(){
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
void Init(){
int i;
for(i=0;i<N*N;i++){
h_a[i] = i;
}
for(i=0;i<N;i++){
h_b[i] = 2*i;
}
}
void print(float *a){
int i;
for(i=0;i<N;i++){
printf("%f ", a[i]);
}
printf("\n");
}
void print_matrix(float *a){
int i, j;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%f ", a[i*N+j]);
}
printf("\n");
}
}
void Sent_to_device(){
size_t size = N*sizeof(float);
cudaError_t Error;
Error = cudaMemcpy(d_a, h_a, N*size, cudaMemcpyHostToDevice);
printf("CUDA error(copy h_a) = %s\n", cudaGetErrorString(Error));
Error = cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
printf("CUDA error(copy h_b) = %s\n", cudaGetErrorString(Error));
}
void Sent_to_host(){
size_t size = N*sizeof(float);
cudaError_t Error;
Error = cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
printf("CUDA error(copy d_c) = %s\n", cudaGetErrorString(Error));
}
__global__ void GPU_product(float *x, float *y, float *z){
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j;
if(i<N){
z[i] = 0.0;
for(j=0;j<N;j++){
z[i] += x[i*N+j]*y[j];
}
}
}
void product1(){
int i, j;
for(i=0;i<N;i++){
h_c[i] = 0.0;
for(j=0;j<N;j++){
h_c[i] += h_a[i*N+j]*h_b[j];
}
}
}
void product2(){
int tpb = 256;
int bpg = (N+tpb-1)/tpb;
GPU_product<<<bpg, tpb>>>(d_a, d_b, d_c);
}
|
f428179f747f190f777d86aa1a7608346c54178b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode(int batch_size,
const void *const *inputs, void **outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, hipStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
hipMemcpyAsync(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, hipMemcpyHostToDevice, stream);
auto on_stream = thrust::hip::par.on(stream);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 4;
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * top_n;
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(on_stream, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size, stream);
hipStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
if (num_detections > top_n) {
thrust::gather(on_stream, indices, indices + num_detections,
in_scores, scores);
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream);
indices = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(on_stream, indices, indices + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float4 box = float4{
in_boxes[((a * 4 + 0) * height + y) * width + x],
in_boxes[((a * 4 + 1) * height + y) * width + x],
in_boxes[((a * 4 + 2) * height + y) * width + x],
in_boxes[((a * 4 + 3) * height + y) * width + x]
};
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x * w + x1 + 0.5f * w;
float pred_ctr_y = box.y * h + y1 + 0.5f * h;
float pred_w = exp(box.z) * w;
float pred_h = exp(box.w) * h;
box = float4{
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
};
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(on_stream, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(on_stream, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
}
| f428179f747f190f777d86aa1a7608346c54178b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh>
#include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode(int batch_size,
const void *const *inputs, void **outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, cudaStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::cub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
cudaMemcpyAsync(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, cudaMemcpyHostToDevice, stream);
auto on_stream = thrust::cuda::par.on(stream);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * (scores_size / num_classes) * 4;
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * top_n;
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(on_stream, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::cub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size, stream);
cudaStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
if (num_detections > top_n) {
thrust::gather(on_stream, indices, indices + num_detections,
in_scores, scores);
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream);
indices = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(on_stream, indices, indices + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float4 box = float4{
in_boxes[((a * 4 + 0) * height + y) * width + x],
in_boxes[((a * 4 + 1) * height + y) * width + x],
in_boxes[((a * 4 + 2) * height + y) * width + x],
in_boxes[((a * 4 + 3) * height + y) * width + x]
};
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x * w + x1 + 0.5f * w;
float pred_ctr_y = box.y * h + y1 + 0.5f * h;
float pred_w = exp(box.z) * w;
float pred_h = exp(box.w) * h;
box = float4{
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
};
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(on_stream, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(on_stream, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
}
|
ba4f6e0d4e37ca88acd0f9973c9324cee48e6af9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| ba4f6e0d4e37ca88acd0f9973c9324cee48e6af9.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
b76a44c2a694062621ca56b35d0ba43c46879a62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add_.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float value = 2;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,value,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,value,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,value,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b76a44c2a694062621ca56b35d0ba43c46879a62.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add_.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float value = 2;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add_<<<gridBlock,threadBlock>>>(input,value,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add_<<<gridBlock,threadBlock>>>(input,value,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add_<<<gridBlock,threadBlock>>>(input,value,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c0847a43c25e7a82ebe7d59f5b40fabde1fe3067.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sequence2batch.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index,
int64_t height, int64_t width,
bool is_src_index) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int id = blockIdx.x + idy * GridDimX;
while (id < height) {
int src_idx = is_src_index ? index[id] : id;
int dst_idx = is_src_index ? id : index[id];
const T* src_data = src + src_idx * width;
T* dst_data = dst + dst_idx * width;
for (int i = idx; i < width; i += BlockDimX) {
dst_data[i] = src_data[i];
}
id += BlockDimY * GridDimX;
}
}
template <typename T>
class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& src,
framework::Vector<size_t> index_lod, framework::Tensor* dst,
bool is_src_index) {
auto src_dims = src.dims();
auto dst_dims = dst->dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2,
platform::errors::InvalidArgument(
"The source tensor must be a matrix with rank 2, but "
"got the source tensor rank is %lu. "
"Please check the rank of the source tensor",
src_dims.size()));
PADDLE_ENFORCE_EQ(dst_dims.size(), 2,
platform::errors::InvalidArgument(
"The destination tensor must be a matrix with rank, "
"but got the destination tensor rank is %lu. "
"Please check the rank of the destination tensor",
dst_dims.size()));
PADDLE_ENFORCE_EQ(
src_dims[1], dst_dims[1],
platform::errors::InvalidArgument(
"The width of the source tensor and the destination tensor must be "
"same. But got %lu != %lu.Please check the rank of the source "
"tensor",
src_dims.size(), dst_dims.size()));
auto height = dst_dims[0];
auto width = dst_dims[1];
auto* src_data = src.data<T>();
auto* dst_data = dst->data<T>();
dim3 threads(128, 8);
dim3 grid(8, 1);
auto stream = context.stream();
hipLaunchKernelGGL(( CopyMatrixRowsKernel<T, 128, 8, 8>), dim3(grid), dim3(threads), 0, stream,
src_data, dst_data, index_lod.CUDAData(context.GetPlace()), height,
width, is_src_index);
}
};
template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, float>;
template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, double>;
template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, float>;
template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, double>;
template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, float>;
template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| c0847a43c25e7a82ebe7d59f5b40fabde1fe3067.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sequence2batch.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index,
int64_t height, int64_t width,
bool is_src_index) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int id = blockIdx.x + idy * GridDimX;
while (id < height) {
int src_idx = is_src_index ? index[id] : id;
int dst_idx = is_src_index ? id : index[id];
const T* src_data = src + src_idx * width;
T* dst_data = dst + dst_idx * width;
for (int i = idx; i < width; i += BlockDimX) {
dst_data[i] = src_data[i];
}
id += BlockDimY * GridDimX;
}
}
template <typename T>
class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& src,
framework::Vector<size_t> index_lod, framework::Tensor* dst,
bool is_src_index) {
auto src_dims = src.dims();
auto dst_dims = dst->dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2,
platform::errors::InvalidArgument(
"The source tensor must be a matrix with rank 2, but "
"got the source tensor rank is %lu. "
"Please check the rank of the source tensor",
src_dims.size()));
PADDLE_ENFORCE_EQ(dst_dims.size(), 2,
platform::errors::InvalidArgument(
"The destination tensor must be a matrix with rank, "
"but got the destination tensor rank is %lu. "
"Please check the rank of the destination tensor",
dst_dims.size()));
PADDLE_ENFORCE_EQ(
src_dims[1], dst_dims[1],
platform::errors::InvalidArgument(
"The width of the source tensor and the destination tensor must be "
"same. But got %lu != %lu.Please check the rank of the source "
"tensor",
src_dims.size(), dst_dims.size()));
auto height = dst_dims[0];
auto width = dst_dims[1];
auto* src_data = src.data<T>();
auto* dst_data = dst->data<T>();
dim3 threads(128, 8);
dim3 grid(8, 1);
auto stream = context.stream();
CopyMatrixRowsKernel<T, 128, 8, 8><<<grid, threads, 0, stream>>>(
src_data, dst_data, index_lod.CUDAData(context.GetPlace()), height,
width, is_src_index);
}
};
template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, float>;
template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, double>;
template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, float>;
template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, double>;
template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, float>;
template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
4498dc855a9ce0f27b4497e872d75863c6902940.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AbstractAPI.h"
#include "interfaces/cuda/Internals.h"
#include <device.h>
#include <cassert>
namespace device {
__global__ void kernel_streamBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize) {
real *srcElement = baseSrcPtr[blockIdx.x];
real *dstElement = baseDstPtr[blockIdx.x];
for (int index = threadIdx.x; index < elementSize; index += blockDim.x) {
dstElement[index] = srcElement[index];
}
}
void Algorithms::streamBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize,
unsigned numElements,
void* streamPtr) {
dim3 block(internals::WARP_SIZE, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
hipLaunchKernelGGL(( kernel_streamBatchedData), dim3(grid), dim3(block), 0, stream, baseSrcPtr, baseDstPtr, elementSize); CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
__global__ void kernel_accumulateBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize) {
real *srcElement = baseSrcPtr[blockIdx.x];
real *dstElement = baseDstPtr[blockIdx.x];
for (int index = threadIdx.x; index < elementSize; index += blockDim.x) {
dstElement[index] += srcElement[index];
}
}
void Algorithms::accumulateBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize,
unsigned numElements,
void* streamPtr) {
dim3 block(internals::WARP_SIZE, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
hipLaunchKernelGGL(( kernel_accumulateBatchedData), dim3(grid), dim3(block), 0, stream, baseSrcPtr, baseDstPtr, elementSize); CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
__global__ void kernel_touchBatchedMemory(real **basePtr, unsigned elementSize, bool clean) {
real *element = basePtr[blockIdx.x];
int id = threadIdx.x;
while (id < elementSize) {
if (clean) {
element[id] = 0.0;
} else {
real value = element[id];
// Do something dummy here. We just need to check the pointers point to valid memory locations.
// Avoid compiler optimization. Possibly, implement a dummy code with asm.
value += 1.0;
value -= 1.0;
}
id += blockDim.x;
}
}
void Algorithms::touchBatchedMemory(real **basePtr,
unsigned elementSize,
unsigned numElements,
bool clean,
void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
hipLaunchKernelGGL(( kernel_touchBatchedMemory), dim3(grid), dim3(block), 0, stream, basePtr, elementSize, clean); CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
__global__ void kernel_setToValue(real** out, real value, size_t elementSize, size_t numElements) {
const int elementId = blockIdx.x;
if (elementId < numElements) {
real *element = out[elementId];
const int tid = threadIdx.x;
for (int i = tid; i < elementSize; i += blockDim.x) {
element[i] = value;
}
}
}
void Algorithms::setToValue(real** out, real value, size_t elementSize, size_t numElements, void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
hipLaunchKernelGGL(( kernel_setToValue), dim3(grid), dim3(block), 0, stream, out, value, elementSize, numElements);
CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
template<typename T>
__global__ void kernel_copyUniformToScatter(T *src, T **dst, size_t srcOffset, size_t copySize) {
T *srcElement = &src[blockIdx.x * srcOffset];
T *dstElement = dst[blockIdx.x];
for (int index = threadIdx.x; index < copySize; index += blockDim.x) {
dstElement[index] = srcElement[index];
}
}
template<typename T>
void Algorithms::copyUniformToScatter(T *src,
T **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
hipLaunchKernelGGL(( kernel_copyUniformToScatter), dim3(grid), dim3(block), 0, stream, src, dst, srcOffset, copySize); CHECK_ERR;
CHECK_ERR;
}
template void Algorithms::copyUniformToScatter(real *src,
real **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyUniformToScatter(int *src,
int **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyUniformToScatter(char *src,
char **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
//--------------------------------------------------------------------------------------------------
template<typename T>
__global__ void kernel_copyScatterToUniform(T **src, T *dst, size_t dstOffset, size_t copySize) {
T *srcElement = src[blockIdx.x];
T *dstElement = &dst[blockIdx.x * dstOffset];
for (int index = threadIdx.x; index < copySize; index += blockDim.x) {
dstElement[index] = srcElement[index];
}
}
template<typename T>
void Algorithms::copyScatterToUniform(T **src,
T *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
hipLaunchKernelGGL(( kernel_copyScatterToUniform), dim3(grid), dim3(block), 0, stream, src, dst, dstOffset, copySize); CHECK_ERR;
CHECK_ERR;
}
template void Algorithms::copyScatterToUniform(real **src,
real *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyScatterToUniform(int **src,
int *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyScatterToUniform(char **src,
char *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
} // namespace device
| 4498dc855a9ce0f27b4497e872d75863c6902940.cu | #include "AbstractAPI.h"
#include "interfaces/cuda/Internals.h"
#include <device.h>
#include <cassert>
namespace device {
__global__ void kernel_streamBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize) {
real *srcElement = baseSrcPtr[blockIdx.x];
real *dstElement = baseDstPtr[blockIdx.x];
for (int index = threadIdx.x; index < elementSize; index += blockDim.x) {
dstElement[index] = srcElement[index];
}
}
void Algorithms::streamBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize,
unsigned numElements,
void* streamPtr) {
dim3 block(internals::WARP_SIZE, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
kernel_streamBatchedData<<<grid, block, 0, stream>>>(baseSrcPtr, baseDstPtr, elementSize); CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
__global__ void kernel_accumulateBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize) {
real *srcElement = baseSrcPtr[blockIdx.x];
real *dstElement = baseDstPtr[blockIdx.x];
for (int index = threadIdx.x; index < elementSize; index += blockDim.x) {
dstElement[index] += srcElement[index];
}
}
void Algorithms::accumulateBatchedData(real **baseSrcPtr,
real **baseDstPtr,
unsigned elementSize,
unsigned numElements,
void* streamPtr) {
dim3 block(internals::WARP_SIZE, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
kernel_accumulateBatchedData<<<grid, block, 0, stream>>>(baseSrcPtr, baseDstPtr, elementSize); CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
__global__ void kernel_touchBatchedMemory(real **basePtr, unsigned elementSize, bool clean) {
real *element = basePtr[blockIdx.x];
int id = threadIdx.x;
while (id < elementSize) {
if (clean) {
element[id] = 0.0;
} else {
real value = element[id];
// Do something dummy here. We just need to check the pointers point to valid memory locations.
// Avoid compiler optimization. Possibly, implement a dummy code with asm.
value += 1.0;
value -= 1.0;
}
id += blockDim.x;
}
}
void Algorithms::touchBatchedMemory(real **basePtr,
unsigned elementSize,
unsigned numElements,
bool clean,
void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
kernel_touchBatchedMemory<<<grid, block, 0, stream>>>(basePtr, elementSize, clean); CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
__global__ void kernel_setToValue(real** out, real value, size_t elementSize, size_t numElements) {
const int elementId = blockIdx.x;
if (elementId < numElements) {
real *element = out[elementId];
const int tid = threadIdx.x;
for (int i = tid; i < elementSize; i += blockDim.x) {
element[i] = value;
}
}
}
void Algorithms::setToValue(real** out, real value, size_t elementSize, size_t numElements, void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
kernel_setToValue<<<grid, block, 0, stream>>>(out, value, elementSize, numElements);
CHECK_ERR;
}
//--------------------------------------------------------------------------------------------------
template<typename T>
__global__ void kernel_copyUniformToScatter(T *src, T **dst, size_t srcOffset, size_t copySize) {
T *srcElement = &src[blockIdx.x * srcOffset];
T *dstElement = dst[blockIdx.x];
for (int index = threadIdx.x; index < copySize; index += blockDim.x) {
dstElement[index] = srcElement[index];
}
}
template<typename T>
void Algorithms::copyUniformToScatter(T *src,
T **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
kernel_copyUniformToScatter<<<grid, block, 0, stream>>>(src, dst, srcOffset, copySize); CHECK_ERR;
CHECK_ERR;
}
template void Algorithms::copyUniformToScatter(real *src,
real **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyUniformToScatter(int *src,
int **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyUniformToScatter(char *src,
char **dst,
size_t srcOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
//--------------------------------------------------------------------------------------------------
template<typename T>
__global__ void kernel_copyScatterToUniform(T **src, T *dst, size_t dstOffset, size_t copySize) {
T *srcElement = src[blockIdx.x];
T *dstElement = &dst[blockIdx.x * dstOffset];
for (int index = threadIdx.x; index < copySize; index += blockDim.x) {
dstElement[index] = srcElement[index];
}
}
template<typename T>
void Algorithms::copyScatterToUniform(T **src,
T *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr) {
dim3 block(256, 1, 1);
dim3 grid(numElements, 1, 1);
auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
kernel_copyScatterToUniform<<<grid, block, 0, stream>>>(src, dst, dstOffset, copySize); CHECK_ERR;
CHECK_ERR;
}
template void Algorithms::copyScatterToUniform(real **src,
real *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyScatterToUniform(int **src,
int *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
template void Algorithms::copyScatterToUniform(char **src,
char *dst,
size_t dstOffset,
size_t copySize,
size_t numElements,
void* streamPtr);
} // namespace device
|
fe26367e92fd192f845242fb097f4a291bf7a514.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <tune_quda.h>
#include <quda_internal.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <color_spinor.h>
#include <dslash_quda.h>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
namespace { // anonymous
#include <texture.h>
}
// This is the maximum number of color spinors we can process in a single kernel
#if (TORCH_HIP_VERSION < 8000)
#define MAX_NVECTOR 1 // multi-vector code doesn't seem to work well with CUDA 7.x
#else
#define MAX_NVECTOR 9
#endif
template<typename Float, typename Output, typename InputA, typename InputB>
struct CloverSigmaOprodArg {
Output oprod;
InputA inA[MAX_NVECTOR];
InputB inB[MAX_NVECTOR];
Float coeff[MAX_NVECTOR][2];
unsigned int length;
int nvector;
CloverSigmaOprodArg(Output &oprod, InputA *inA_, InputB *inB_,
const std::vector<std::vector<double> > &coeff_,
const GaugeField &meta, int nvector)
: oprod(oprod), length(meta.VolumeCB()), nvector(nvector)
{
for (int i=0; i<nvector; i++) {
inA[i] = inA_[i];
inB[i] = inB_[i];
coeff[i][0] = coeff_[i][0];
coeff[i][1] = coeff_[i][1];
}
}
};
template <typename real, int nvector, int mu, int nu, int parity, typename Arg>
inline __device__ void sigmaOprod(Arg &arg, int idx) {
typedef complex<real> Complex;
Matrix<Complex,3> result;
#pragma unroll
for (int i=0; i<nvector; i++) {
ColorSpinor<real,3,4> A, B;
arg.inA[i].load(static_cast<Complex*>(A.data), idx, parity);
arg.inB[i].load(static_cast<Complex*>(B.data), idx, parity);
// multiply by sigma_mu_nu
ColorSpinor<real,3,4> C = A.sigma(nu,mu);
result += arg.coeff[i][parity] * outerProdSpinTrace(C,B);
}
result -= conj(result);
Matrix<Complex,3> temp;
arg.oprod.load(reinterpret_cast<real*>(temp.data), idx, (mu-1)*mu/2 + nu, parity);
temp = result + temp;
arg.oprod.save(reinterpret_cast<real*>(temp.data), idx, (mu-1)*mu/2 + nu, parity);
}
template<int nvector, typename real, typename Output, typename InputA, typename InputB>
__global__ void sigmaOprodKernel(CloverSigmaOprodArg<real, Output, InputA, InputB> arg) {
typedef complex<real> Complex;
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockIdx.y*blockDim.y + threadIdx.y;
int mu_nu = blockIdx.z*blockDim.z + threadIdx.z;
if (idx >= arg.length) return;
if (mu_nu >= 6) return;
switch(parity) {
case 0:
switch(mu_nu) {
case 0: sigmaOprod<real, nvector, 1, 0, 0>(arg, idx); break;
case 1: sigmaOprod<real, nvector, 2, 0, 0>(arg, idx); break;
case 2: sigmaOprod<real, nvector, 2, 1, 0>(arg, idx); break;
case 3: sigmaOprod<real, nvector, 3, 0, 0>(arg, idx); break;
case 4: sigmaOprod<real, nvector, 3, 1, 0>(arg, idx); break;
case 5: sigmaOprod<real, nvector, 3, 2, 0>(arg, idx); break;
}
break;
case 1:
switch(mu_nu) {
case 0: sigmaOprod<real, nvector, 1, 0, 1>(arg, idx); break;
case 1: sigmaOprod<real, nvector, 2, 0, 1>(arg, idx); break;
case 2: sigmaOprod<real, nvector, 2, 1, 1>(arg, idx); break;
case 3: sigmaOprod<real, nvector, 3, 0, 1>(arg, idx); break;
case 4: sigmaOprod<real, nvector, 3, 1, 1>(arg, idx); break;
case 5: sigmaOprod<real, nvector, 3, 2, 1>(arg, idx); break;
}
break;
}
return;
} // sigmaOprodKernel
template<typename Float, typename Output, typename InputA, typename InputB>
class CloverSigmaOprod : public TunableVectorYZ {
private:
CloverSigmaOprodArg<Float,Output,InputA,InputB> &arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.length; }
bool tuneGridDim() const { return false; }
public:
CloverSigmaOprod(CloverSigmaOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta)
: TunableVectorYZ(2,6), arg(arg), meta(meta) {
writeAuxString("prec=%lu,stride=%d,nvector=%d", sizeof(Float), arg.inA[0].Stride(), arg.nvector);
// this sets the communications pattern for the packing kernel
}
virtual ~CloverSigmaOprod() {}
void apply(const hipStream_t &stream){
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this,getTuning(),getVerbosity());
switch(arg.nvector) {
case 1:hipLaunchKernelGGL(( sigmaOprodKernel< 1>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 2:hipLaunchKernelGGL(( sigmaOprodKernel< 2>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 3:hipLaunchKernelGGL(( sigmaOprodKernel< 3>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 4:hipLaunchKernelGGL(( sigmaOprodKernel< 4>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 5:hipLaunchKernelGGL(( sigmaOprodKernel< 5>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 6:hipLaunchKernelGGL(( sigmaOprodKernel< 6>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 7:hipLaunchKernelGGL(( sigmaOprodKernel< 7>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 8:hipLaunchKernelGGL(( sigmaOprodKernel< 8>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
case 9:hipLaunchKernelGGL(( sigmaOprodKernel< 9>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break;
}
} else { // run the CPU code
errorQuda("No CPU support for staggered outer-product calculation\n");
}
} // apply
void preTune() { this->arg.oprod.save(); }
void postTune() { this->arg.oprod.load(); }
long long flops() const {
return (2*(long long)arg.length)*6*((0 + 144 + 18)*arg.nvector + 18); // spin_mu_nu + spin trace + multiply-add
}
long long bytes() const {
return (2*(long long)arg.length)*6*((arg.inA[0].Bytes() + arg.inB[0].Bytes())*arg.nvector + 2*arg.oprod.Bytes());
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), "CloverSigmaOprod", aux);
}
}; // CloverSigmaOprod
template<typename Float, typename Output, typename InputA, typename InputB>
void computeCloverSigmaOprod(Output oprod, const GaugeField& out, InputA *inA, InputB *inB,
std::vector<std::vector<double> > &coeff, int nvector) {
// Create the arguments
CloverSigmaOprodArg<Float,Output,InputA,InputB> arg(oprod, inA, inB, coeff, out, nvector);
CloverSigmaOprod<Float,Output,InputA,InputB> sigma_oprod(arg, out);
sigma_oprod.apply(0);
} // computeCloverSigmaOprod
#endif // GPU_CLOVER_FORCE
void computeCloverSigmaOprod(GaugeField& oprod,
std::vector<ColorSpinorField*> &x,
std::vector<ColorSpinorField*> &p,
std::vector<std::vector<double> > &coeff)
{
#ifdef GPU_CLOVER_DIRAC
if (x.size() > MAX_NVECTOR) {
// divide and conquer
std::vector<ColorSpinorField*> x0(x.begin(), x.begin()+x.size()/2);
std::vector<ColorSpinorField*> p0(p.begin(), p.begin()+p.size()/2);
std::vector<std::vector<double> > coeff0(coeff.begin(), coeff.begin()+coeff.size()/2);
for (unsigned int i=0; i<coeff0.size(); i++) {
coeff0[i].reserve(2); coeff0[i][0] = coeff[i][0]; coeff0[i][1] = coeff[i][1];
}
computeCloverSigmaOprod(oprod, x0, p0, coeff0);
std::vector<ColorSpinorField*> x1(x.begin()+x.size()/2, x.end());
std::vector<ColorSpinorField*> p1(p.begin()+p.size()/2, p.end());
std::vector<std::vector<double> > coeff1(coeff.begin()+coeff.size()/2, coeff.end());
for (unsigned int i=0; i<coeff1.size(); i++) {
coeff1[i].reserve(2); coeff1[i][0] = coeff[coeff.size()/2 + i][0]; coeff1[i][1] = coeff[coeff.size()/2 + i][1];
}
computeCloverSigmaOprod(oprod, x1, p1, coeff1);
return;
}
if(oprod.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", oprod.Order());
if(x[0]->Precision() != oprod.Precision())
errorQuda("Mixed precision not supported: %d %d\n", x[0]->Precision(), oprod.Precision());
if(oprod.Precision() == QUDA_DOUBLE_PRECISION){
Spinor<double2, double2, 12, 0, 0> spinorA[MAX_NVECTOR];
Spinor<double2, double2, 12, 0, 1> spinorB[MAX_NVECTOR];
for (unsigned int i=0; i<x.size(); i++) {
spinorA[i].set(*dynamic_cast<cudaColorSpinorField*>(x[i]));
spinorB[i].set(*dynamic_cast<cudaColorSpinorField*>(p[i]));
}
computeCloverSigmaOprod<double>(gauge::FloatNOrder<double, 18, 2, 18>(oprod),
oprod, spinorA, spinorB, coeff, x.size());
} else {
errorQuda("Unsupported precision: %d\n", oprod.Precision());
}
#else // GPU_CLOVER_DIRAC not defined
errorQuda("Clover Dirac operator has not been built!");
#endif
checkCudaError();
return;
} // computeCloverForce
} // namespace quda
| fe26367e92fd192f845242fb097f4a291bf7a514.cu | #include <cstdio>
#include <cstdlib>
#include <tune_quda.h>
#include <quda_internal.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <color_spinor.h>
#include <dslash_quda.h>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
namespace { // anonymous
#include <texture.h>
}
// This is the maximum number of color spinors we can process in a single kernel
#if (CUDA_VERSION < 8000)
#define MAX_NVECTOR 1 // multi-vector code doesn't seem to work well with CUDA 7.x
#else
#define MAX_NVECTOR 9
#endif
template<typename Float, typename Output, typename InputA, typename InputB>
struct CloverSigmaOprodArg {
Output oprod;
InputA inA[MAX_NVECTOR];
InputB inB[MAX_NVECTOR];
Float coeff[MAX_NVECTOR][2];
unsigned int length;
int nvector;
CloverSigmaOprodArg(Output &oprod, InputA *inA_, InputB *inB_,
const std::vector<std::vector<double> > &coeff_,
const GaugeField &meta, int nvector)
: oprod(oprod), length(meta.VolumeCB()), nvector(nvector)
{
for (int i=0; i<nvector; i++) {
inA[i] = inA_[i];
inB[i] = inB_[i];
coeff[i][0] = coeff_[i][0];
coeff[i][1] = coeff_[i][1];
}
}
};
template <typename real, int nvector, int mu, int nu, int parity, typename Arg>
inline __device__ void sigmaOprod(Arg &arg, int idx) {
typedef complex<real> Complex;
Matrix<Complex,3> result;
#pragma unroll
for (int i=0; i<nvector; i++) {
ColorSpinor<real,3,4> A, B;
arg.inA[i].load(static_cast<Complex*>(A.data), idx, parity);
arg.inB[i].load(static_cast<Complex*>(B.data), idx, parity);
// multiply by sigma_mu_nu
ColorSpinor<real,3,4> C = A.sigma(nu,mu);
result += arg.coeff[i][parity] * outerProdSpinTrace(C,B);
}
result -= conj(result);
Matrix<Complex,3> temp;
arg.oprod.load(reinterpret_cast<real*>(temp.data), idx, (mu-1)*mu/2 + nu, parity);
temp = result + temp;
arg.oprod.save(reinterpret_cast<real*>(temp.data), idx, (mu-1)*mu/2 + nu, parity);
}
template<int nvector, typename real, typename Output, typename InputA, typename InputB>
__global__ void sigmaOprodKernel(CloverSigmaOprodArg<real, Output, InputA, InputB> arg) {
typedef complex<real> Complex;
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int parity = blockIdx.y*blockDim.y + threadIdx.y;
int mu_nu = blockIdx.z*blockDim.z + threadIdx.z;
if (idx >= arg.length) return;
if (mu_nu >= 6) return;
switch(parity) {
case 0:
switch(mu_nu) {
case 0: sigmaOprod<real, nvector, 1, 0, 0>(arg, idx); break;
case 1: sigmaOprod<real, nvector, 2, 0, 0>(arg, idx); break;
case 2: sigmaOprod<real, nvector, 2, 1, 0>(arg, idx); break;
case 3: sigmaOprod<real, nvector, 3, 0, 0>(arg, idx); break;
case 4: sigmaOprod<real, nvector, 3, 1, 0>(arg, idx); break;
case 5: sigmaOprod<real, nvector, 3, 2, 0>(arg, idx); break;
}
break;
case 1:
switch(mu_nu) {
case 0: sigmaOprod<real, nvector, 1, 0, 1>(arg, idx); break;
case 1: sigmaOprod<real, nvector, 2, 0, 1>(arg, idx); break;
case 2: sigmaOprod<real, nvector, 2, 1, 1>(arg, idx); break;
case 3: sigmaOprod<real, nvector, 3, 0, 1>(arg, idx); break;
case 4: sigmaOprod<real, nvector, 3, 1, 1>(arg, idx); break;
case 5: sigmaOprod<real, nvector, 3, 2, 1>(arg, idx); break;
}
break;
}
return;
} // sigmaOprodKernel
template<typename Float, typename Output, typename InputA, typename InputB>
class CloverSigmaOprod : public TunableVectorYZ {
private:
CloverSigmaOprodArg<Float,Output,InputA,InputB> &arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.length; }
bool tuneGridDim() const { return false; }
public:
CloverSigmaOprod(CloverSigmaOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta)
: TunableVectorYZ(2,6), arg(arg), meta(meta) {
writeAuxString("prec=%lu,stride=%d,nvector=%d", sizeof(Float), arg.inA[0].Stride(), arg.nvector);
// this sets the communications pattern for the packing kernel
}
virtual ~CloverSigmaOprod() {}
void apply(const cudaStream_t &stream){
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this,getTuning(),getVerbosity());
switch(arg.nvector) {
case 1: sigmaOprodKernel< 1><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 2: sigmaOprodKernel< 2><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 3: sigmaOprodKernel< 3><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 4: sigmaOprodKernel< 4><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 5: sigmaOprodKernel< 5><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 6: sigmaOprodKernel< 6><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 7: sigmaOprodKernel< 7><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 8: sigmaOprodKernel< 8><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
case 9: sigmaOprodKernel< 9><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break;
}
} else { // run the CPU code
errorQuda("No CPU support for staggered outer-product calculation\n");
}
} // apply
void preTune() { this->arg.oprod.save(); }
void postTune() { this->arg.oprod.load(); }
long long flops() const {
return (2*(long long)arg.length)*6*((0 + 144 + 18)*arg.nvector + 18); // spin_mu_nu + spin trace + multiply-add
}
long long bytes() const {
return (2*(long long)arg.length)*6*((arg.inA[0].Bytes() + arg.inB[0].Bytes())*arg.nvector + 2*arg.oprod.Bytes());
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), "CloverSigmaOprod", aux);
}
}; // CloverSigmaOprod
template<typename Float, typename Output, typename InputA, typename InputB>
void computeCloverSigmaOprod(Output oprod, const GaugeField& out, InputA *inA, InputB *inB,
std::vector<std::vector<double> > &coeff, int nvector) {
// Create the arguments
CloverSigmaOprodArg<Float,Output,InputA,InputB> arg(oprod, inA, inB, coeff, out, nvector);
CloverSigmaOprod<Float,Output,InputA,InputB> sigma_oprod(arg, out);
sigma_oprod.apply(0);
} // computeCloverSigmaOprod
#endif // GPU_CLOVER_FORCE
void computeCloverSigmaOprod(GaugeField& oprod,
std::vector<ColorSpinorField*> &x,
std::vector<ColorSpinorField*> &p,
std::vector<std::vector<double> > &coeff)
{
#ifdef GPU_CLOVER_DIRAC
if (x.size() > MAX_NVECTOR) {
// divide and conquer
std::vector<ColorSpinorField*> x0(x.begin(), x.begin()+x.size()/2);
std::vector<ColorSpinorField*> p0(p.begin(), p.begin()+p.size()/2);
std::vector<std::vector<double> > coeff0(coeff.begin(), coeff.begin()+coeff.size()/2);
for (unsigned int i=0; i<coeff0.size(); i++) {
coeff0[i].reserve(2); coeff0[i][0] = coeff[i][0]; coeff0[i][1] = coeff[i][1];
}
computeCloverSigmaOprod(oprod, x0, p0, coeff0);
std::vector<ColorSpinorField*> x1(x.begin()+x.size()/2, x.end());
std::vector<ColorSpinorField*> p1(p.begin()+p.size()/2, p.end());
std::vector<std::vector<double> > coeff1(coeff.begin()+coeff.size()/2, coeff.end());
for (unsigned int i=0; i<coeff1.size(); i++) {
coeff1[i].reserve(2); coeff1[i][0] = coeff[coeff.size()/2 + i][0]; coeff1[i][1] = coeff[coeff.size()/2 + i][1];
}
computeCloverSigmaOprod(oprod, x1, p1, coeff1);
return;
}
if(oprod.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", oprod.Order());
if(x[0]->Precision() != oprod.Precision())
errorQuda("Mixed precision not supported: %d %d\n", x[0]->Precision(), oprod.Precision());
if(oprod.Precision() == QUDA_DOUBLE_PRECISION){
Spinor<double2, double2, 12, 0, 0> spinorA[MAX_NVECTOR];
Spinor<double2, double2, 12, 0, 1> spinorB[MAX_NVECTOR];
for (unsigned int i=0; i<x.size(); i++) {
spinorA[i].set(*dynamic_cast<cudaColorSpinorField*>(x[i]));
spinorB[i].set(*dynamic_cast<cudaColorSpinorField*>(p[i]));
}
computeCloverSigmaOprod<double>(gauge::FloatNOrder<double, 18, 2, 18>(oprod),
oprod, spinorA, spinorB, coeff, x.size());
} else {
errorQuda("Unsupported precision: %d\n", oprod.Precision());
}
#else // GPU_CLOVER_DIRAC not defined
errorQuda("Clover Dirac operator has not been built!");
#endif
checkCudaError();
return;
} // computeCloverForce
} // namespace quda
|
5c553aa7a1576d5b4fb29887cb4de45ef6074182.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDepthwiseConvolution.cu"
#else
void THNN_(SpatialDepthwiseConvolution_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, input, output, weight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4);
// We assume that the input and weight Tensors are shaped properly by
// the caller, so we verify that here to some extent
// Weight Tensor is shape (output_channels, 1, kH, kW)
THAssert(weight->size(1) == 1);
// Input Tensor is shape (N, input_channels, H, W)
// We verify that the # of output_channels is a multiple of input_channels
THAssert(weight->size(0) % input->size(1) == 0);
// Bias has same # of channels as output
if (bias) {
THAssert(bias->size(0) == weight->size(0));
}
input = THCTensor_(newContiguous)(state, input);
weight = THCTensor_(newContiguous)(state, weight);
bias = bias ? THCTensor_(newContiguous)(state, bias) : bias;
// Following the behvaior of other THCUNN functions, we shape the output
// Tensor ourselves
int batchSize = input->size(0);
int height = input->size(2);
int width = input->size(3);
int outputHeight = (height + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
int outputWidth = (width + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int outputChannels = weight->size(0);
THCTensor_(resize4d)(state, output, batchSize, outputChannels, outputHeight, outputWidth);
// Create THCDeviceTensor
// Kernel currently relies upon all the Tensors to be contiguous, but we made
// them contiguous above
THCDeviceTensor<real, 4> dInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> dWeight = toDeviceTensor<real, 4>(state, weight);
THCDeviceTensor<real, 4> dOutput = toDeviceTensor<real, 4>(state, output);
THCDeviceTensor<real, 1> dBias;
if (bias) {
dBias = toDeviceTensor<real, 1>(state, bias);
}
int inputChannels = input->size(1);
int depthwiseMultiplier = outputChannels / inputChannels;
// One thread per output value
int n = THCTensor_(nElement)(state, output);
int blocks = GET_BLOCKS(n);
dim3 grid(blocks);
dim3 block(CUDA_NUM_THREADS);
if (kW == 3 && kH == 3) {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateOutput<real, accreal, unsigned int, 3>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (kW == 1 && kH == 1) {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateOutput<real, accreal, unsigned int, 1>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateOutput<real, accreal, unsigned int, 0>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
if (bias) THCTensor_(free)(state, bias);
}
void THNN_(SpatialDepthwiseConvolution_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, gradOutput, gradInput, weight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4);
THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4);
// Minimal shape checking, as above
// Same # of elements in batch
THAssert(input->size(0) == gradOutput->size(0));
// Same # of filters as outputChannels
THAssert(weight->size(0) == gradOutput->size(1));
// Resize GradInput
THCTensor_(resizeAs)(state, gradInput, input);
int inputChannels = input->size(1);
int height = input->size(2);
int width = input->size(3);
int outputChannels = gradOutput->size(1);
int outputHeight = gradOutput->size(2);
int outputWidth = gradOutput->size(3);
int depthwiseMultiplier = outputChannels / inputChannels;
THCDeviceTensor<real, 4> dGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
THCDeviceTensor<real, 4> dGradInput = toDeviceTensor<real, 4>(state, gradInput);
THCDeviceTensor<real, 4> dWeight = toDeviceTensor<real, 4>(state, weight);
// Kernel currently relies upon all the Tensors to be contiguous
THAssert(dGradOutput.isContiguous());
THAssert(dGradInput.isContiguous());
THAssert(dWeight.isContiguous());
// One thread per gradInput value
int n = THCTensor_(nElement)(state, gradInput);
int blocks = GET_BLOCKS(n);
dim3 grid(blocks);
dim3 block(CUDA_NUM_THREADS);
if (kW == 3 && kH == 3)
if (dW == 1 && dH == 1){
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 3, 1>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 3, 2>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 3, 0>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
else if (kW == 1 && kH == 1)
if (dW == 1 && dH == 1){
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 1, 1>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 1, 2>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 1, 0>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
else
if (dW == 1 && dH == 1){
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 0, 1>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 0, 2>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
hipLaunchKernelGGL(( spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 0, 0>), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
THCudaCheck(hipGetLastError());
}
void THNN_(SpatialDepthwiseConvolution_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradWeight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4);
THAssert(!gradWeight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradWeight) == 4);
// Minimal shape checking as above
// Same # of elements in batch
THAssert(input->size(0) == gradOutput->size(0));
// Same # of filters as outputChannels
THAssert(gradWeight->size(0) == gradOutput->size(1));
int batchSize = input->size(0);
int inputChannels = input->size(1);
int height = input->size(2);
int width = input->size(3);
int outputChannels = gradOutput->size(1);
int outputHeight = gradOutput->size(2);
int outputWidth = gradOutput->size(3);
int depthwiseMultiplier = outputChannels / inputChannels;
THCDeviceTensor<real, 4> dGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
THCDeviceTensor<real, 4> dInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> dGradWeight = toDeviceTensor<real, 4>(state, gradWeight);
// Kernel currently relies upon all the Tensors to be contiguous
THAssert(dGradOutput.isContiguous());
THAssert(dInput.isContiguous());
THAssert(dGradWeight.isContiguous());
// We parallelize so that each block computes a single value in gradWeight
int blocks = outputChannels * kH * kW;
// Make sure we have enough threads to perform the reduction, and use this number
// to create the shared memory size for the reduction
dim3 grid(blocks);
dim3 block(getGradParamsNumThreads(batchSize));
int smem = block.x * sizeof(accreal);
hipLaunchKernelGGL(( spatialDepthwiseConvolutionAccGradParameters<real, accreal, unsigned int>), dim3(grid), dim3(block), smem, THCState_getCurrentStream(state),
dGradOutput, dInput, dGradWeight, batchSize, inputChannels, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
THCudaCheck(hipGetLastError());
}
#endif
| 5c553aa7a1576d5b4fb29887cb4de45ef6074182.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDepthwiseConvolution.cu"
#else
void THNN_(SpatialDepthwiseConvolution_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, input, output, weight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4);
// We assume that the input and weight Tensors are shaped properly by
// the caller, so we verify that here to some extent
// Weight Tensor is shape (output_channels, 1, kH, kW)
THAssert(weight->size(1) == 1);
// Input Tensor is shape (N, input_channels, H, W)
// We verify that the # of output_channels is a multiple of input_channels
THAssert(weight->size(0) % input->size(1) == 0);
// Bias has same # of channels as output
if (bias) {
THAssert(bias->size(0) == weight->size(0));
}
input = THCTensor_(newContiguous)(state, input);
weight = THCTensor_(newContiguous)(state, weight);
bias = bias ? THCTensor_(newContiguous)(state, bias) : bias;
// Following the behvaior of other THCUNN functions, we shape the output
// Tensor ourselves
int batchSize = input->size(0);
int height = input->size(2);
int width = input->size(3);
int outputHeight = (height + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
int outputWidth = (width + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
int outputChannels = weight->size(0);
THCTensor_(resize4d)(state, output, batchSize, outputChannels, outputHeight, outputWidth);
// Create THCDeviceTensor
// Kernel currently relies upon all the Tensors to be contiguous, but we made
// them contiguous above
THCDeviceTensor<real, 4> dInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> dWeight = toDeviceTensor<real, 4>(state, weight);
THCDeviceTensor<real, 4> dOutput = toDeviceTensor<real, 4>(state, output);
THCDeviceTensor<real, 1> dBias;
if (bias) {
dBias = toDeviceTensor<real, 1>(state, bias);
}
int inputChannels = input->size(1);
int depthwiseMultiplier = outputChannels / inputChannels;
// One thread per output value
int n = THCTensor_(nElement)(state, output);
int blocks = GET_BLOCKS(n);
dim3 grid(blocks);
dim3 block(CUDA_NUM_THREADS);
if (kW == 3 && kH == 3) {
spatialDepthwiseConvolutionUpdateOutput<real, accreal, unsigned int, 3><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (kW == 1 && kH == 1) {
spatialDepthwiseConvolutionUpdateOutput<real, accreal, unsigned int, 1><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateOutput<real, accreal, unsigned int, 0><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dInput, dOutput, dWeight, dBias, bias != NULL, n, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight,
kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
if (bias) THCTensor_(free)(state, bias);
}
void THNN_(SpatialDepthwiseConvolution_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, gradOutput, gradInput, weight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!weight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, weight) == 4);
THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4);
// Minimal shape checking, as above
// Same # of elements in batch
THAssert(input->size(0) == gradOutput->size(0));
// Same # of filters as outputChannels
THAssert(weight->size(0) == gradOutput->size(1));
// Resize GradInput
THCTensor_(resizeAs)(state, gradInput, input);
int inputChannels = input->size(1);
int height = input->size(2);
int width = input->size(3);
int outputChannels = gradOutput->size(1);
int outputHeight = gradOutput->size(2);
int outputWidth = gradOutput->size(3);
int depthwiseMultiplier = outputChannels / inputChannels;
THCDeviceTensor<real, 4> dGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
THCDeviceTensor<real, 4> dGradInput = toDeviceTensor<real, 4>(state, gradInput);
THCDeviceTensor<real, 4> dWeight = toDeviceTensor<real, 4>(state, weight);
// Kernel currently relies upon all the Tensors to be contiguous
THAssert(dGradOutput.isContiguous());
THAssert(dGradInput.isContiguous());
THAssert(dWeight.isContiguous());
// One thread per gradInput value
int n = THCTensor_(nElement)(state, gradInput);
int blocks = GET_BLOCKS(n);
dim3 grid(blocks);
dim3 block(CUDA_NUM_THREADS);
if (kW == 3 && kH == 3)
if (dW == 1 && dH == 1){
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 3, 1><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 3, 2><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 3, 0><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
else if (kW == 1 && kH == 1)
if (dW == 1 && dH == 1){
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 1, 1><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 1, 2><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 1, 0><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
else
if (dW == 1 && dH == 1){
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 0, 1><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else if (dW == 2 && dH == 2) {
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 0, 2><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
} else {
spatialDepthwiseConvolutionUpdateGradInput<real, accreal, unsigned int, 0, 0><<<grid, block, 0, THCState_getCurrentStream(state)>>>(
dGradOutput, dGradInput, dWeight, n, inputChannels, depthwiseMultiplier, outputChannels, width,
height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
}
THCudaCheck(cudaGetLastError());
}
void THNN_(SpatialDepthwiseConvolution_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradWeight);
// Only handle 4D Input Tensors for now
THAssert(!input->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, input) == 4);
THAssert(!gradOutput->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 4);
THAssert(!gradWeight->is_empty() && THCTensor_(nDimensionLegacyNoScalars)(state, gradWeight) == 4);
// Minimal shape checking as above
// Same # of elements in batch
THAssert(input->size(0) == gradOutput->size(0));
// Same # of filters as outputChannels
THAssert(gradWeight->size(0) == gradOutput->size(1));
int batchSize = input->size(0);
int inputChannels = input->size(1);
int height = input->size(2);
int width = input->size(3);
int outputChannels = gradOutput->size(1);
int outputHeight = gradOutput->size(2);
int outputWidth = gradOutput->size(3);
int depthwiseMultiplier = outputChannels / inputChannels;
THCDeviceTensor<real, 4> dGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
THCDeviceTensor<real, 4> dInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> dGradWeight = toDeviceTensor<real, 4>(state, gradWeight);
// Kernel currently relies upon all the Tensors to be contiguous
THAssert(dGradOutput.isContiguous());
THAssert(dInput.isContiguous());
THAssert(dGradWeight.isContiguous());
// We parallelize so that each block computes a single value in gradWeight
int blocks = outputChannels * kH * kW;
// Make sure we have enough threads to perform the reduction, and use this number
// to create the shared memory size for the reduction
dim3 grid(blocks);
dim3 block(getGradParamsNumThreads(batchSize));
int smem = block.x * sizeof(accreal);
spatialDepthwiseConvolutionAccGradParameters<real, accreal, unsigned int><<<grid, block, smem, THCState_getCurrentStream(state)>>>(
dGradOutput, dInput, dGradWeight, batchSize, inputChannels, outputChannels, depthwiseMultiplier,
width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH);
THCudaCheck(cudaGetLastError());
}
#endif
|
028d55110a91ae8073eaff5541585d3b62b1741a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void print_my_index()
{
int tid = threadIdx.x;
int bid = blockIdx.x;
printf("my id :%d , block_id :%d \n",tid,bid);
} | 028d55110a91ae8073eaff5541585d3b62b1741a.cu | #include "includes.h"
__global__ void print_my_index()
{
int tid = threadIdx.x;
int bid = blockIdx.x;
printf("my id :%d , block_id :%d \n",tid,bid);
} |
fee5fad2927ec85df658e2f1b6a79407759b85e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
##### File Name: squareSum3.cu
##### File Func: calculate the sum of inputs's square
##### Author: HeZhichao
##### E-mail: [email protected]
##### Create Time: 2016-5-11
*********************************************************************/
# include<stdio.h>
# include <stdlib.h>
# include <hip/hip_runtime.h>
// ======== define area ========
# define DATA_SIZE 1048576 // 1M
# define THREAD_NUM 1024//# define THREAD_NUM 1024 // thread num
// ======== global area ========
int data[DATA_SIZE];
void printDeviceProp( const hipDeviceProp_t &prop);
bool InitCUDA();
void generateData( int *data, int size);
__global__ static void squaresSum( int *data, int *sum, clock_t *time);
int main( int argc, char const *argv[]) {
// init CUDA device
if (!InitCUDA()) { return 0 ; }
printf ( "CUDA initialized.\n" );
// generate rand datas
printf("test !\n");
generateData(data, DATA_SIZE);
// malloc space for datas in GPU
int *gpuData;
int *sum;
clock_t *time;
printf("hipMalloc start !\n");
hipMalloc(( void **) &gpuData, sizeof ( int ) * DATA_SIZE);
printf("hipMalloc gpuData is ok !\n");
hipMalloc(( void **) &sum, sizeof ( int )*THREAD_NUM);
printf("hipMalloc sum is ok !\n");
hipMalloc(( void **) &time, sizeof (clock_t));
hipMemcpy(gpuData, data, sizeof ( int ) * DATA_SIZE, hipMemcpyHostToDevice);
printf("hipMemcpy data to gpuData is ok !\n");
// calculate the squares's sum
//CUDAGPU<<<block num, thread num, shared memory size>>>(param,...) ;
hipLaunchKernelGGL(( squaresSum), dim3(1) , dim3(THREAD_NUM) , 0 , 0, gpuData, sum, time);
// copy the result from GPU to HOST
int result[THREAD_NUM];
clock_t time_used;
hipMemcpy(result, sum, sizeof ( int )*THREAD_NUM, hipMemcpyDeviceToHost);
hipMemcpy(&time_used, time, sizeof (clock_t), hipMemcpyDeviceToHost);
// free GPU spaces
hipFree(gpuData);
hipFree(sum); hipFree(time);
// print result
int tmp_result = 0;
for(int i=0;i<THREAD_NUM;++i){
tmp_result += result[i];
}
printf ( "(GPU) sum:%d time:%ld\n" , tmp_result, time_used);
// CPU calculate
tmp_result = 0 ;
clock_t start = clock();
for ( int i = 0 ; i < DATA_SIZE; ++i) {
tmp_result += data[i] * data[i];
}
time_used = clock() - start;
printf ( "(CPU) sum:%d time:%ld\n" , tmp_result, time_used);/**/
return 0 ;
}
//__global__ means that this function run in GPU, there isn't any return value.
__global__ static void squaresSum( int *data, int *sum, clock_t *time) {
const int size = DATA_SIZE / THREAD_NUM;
const int tid = threadIdx.x;
int tmp_sum = 0 ;
clock_t start = clock();
for ( int i = tid*size ; i < (tid+1)*size; ++i) {
tmp_sum += data[i] * data[i];
}
sum[tid] = tmp_sum;
*time = clock() - start;
}
// ======== used to generate rand datas ========
void generateData( int *data, int size) {
printf("generateData !");
for ( int i = 0 ; i < size; ++i) {
data[i] = rand() % 10 ;
}
}
void printDeviceProp(const hipDeviceProp_t &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %lu.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %lu.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %lu.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %lu.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %lu.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
bool InitCUDA()
{
//used to count the device numbers
int count;
// get the cuda device count
hipGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
// find the device >= 1.X
bool noDeviceSupport = true;
int i;
for (i = 0; i < count; ++i) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
noDeviceSupport = false;
printf("****** Device No%d*********************************\n",i);
printDeviceProp(prop);
printf("\n");
}
}
}
// if can't find the device
if (noDeviceSupport == true) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
hipSetDevice(4);
printf ( "Device No%d is selected.\n",4 );
return true;
}
| fee5fad2927ec85df658e2f1b6a79407759b85e2.cu | /********************************************************************
##### File Name: squareSum3.cu
##### File Func: calculate the sum of inputs's square
##### Author: HeZhichao
##### E-mail: [email protected]
##### Create Time: 2016-5-11
*********************************************************************/
# include<stdio.h>
# include <stdlib.h>
# include <cuda_runtime.h>
// ======== define area ========
# define DATA_SIZE 1048576 // 1M
# define THREAD_NUM 1024//# define THREAD_NUM 1024 // thread num
// ======== global area ========
int data[DATA_SIZE];
void printDeviceProp( const cudaDeviceProp &prop);
bool InitCUDA();
void generateData( int *data, int size);
__global__ static void squaresSum( int *data, int *sum, clock_t *time);
int main( int argc, char const *argv[]) {
// init CUDA device
if (!InitCUDA()) { return 0 ; }
printf ( "CUDA initialized.\n" );
// generate rand datas
printf("test !\n");
generateData(data, DATA_SIZE);
// malloc space for datas in GPU
int *gpuData;
int *sum;
clock_t *time;
printf("cudaMalloc start !\n");
cudaMalloc(( void **) &gpuData, sizeof ( int ) * DATA_SIZE);
printf("cudaMalloc gpuData is ok !\n");
cudaMalloc(( void **) &sum, sizeof ( int )*THREAD_NUM);
printf("cudaMalloc sum is ok !\n");
cudaMalloc(( void **) &time, sizeof (clock_t));
cudaMemcpy(gpuData, data, sizeof ( int ) * DATA_SIZE, cudaMemcpyHostToDevice);
printf("cudaMemcpy data to gpuData is ok !\n");
// calculate the squares's sum
//CUDA调用在GPU中函数名称<<<block num, thread num, shared memory size>>>(param,...) ;
squaresSum<<< 1 , THREAD_NUM , 0 >>>(gpuData, sum, time);
// copy the result from GPU to HOST
int result[THREAD_NUM];
clock_t time_used;
cudaMemcpy(result, sum, sizeof ( int )*THREAD_NUM, cudaMemcpyDeviceToHost);
cudaMemcpy(&time_used, time, sizeof (clock_t), cudaMemcpyDeviceToHost);
// free GPU spaces
cudaFree(gpuData);
cudaFree(sum); cudaFree(time);
// print result
int tmp_result = 0;
for(int i=0;i<THREAD_NUM;++i){
tmp_result += result[i];
}
printf ( "(GPU) sum:%d time:%ld\n" , tmp_result, time_used);
// CPU calculate
tmp_result = 0 ;
clock_t start = clock();
for ( int i = 0 ; i < DATA_SIZE; ++i) {
tmp_result += data[i] * data[i];
}
time_used = clock() - start;
printf ( "(CPU) sum:%d time:%ld\n" , tmp_result, time_used);/**/
return 0 ;
}
//__global__ means that this function run in GPU, there isn't any return value.
__global__ static void squaresSum( int *data, int *sum, clock_t *time) {
const int size = DATA_SIZE / THREAD_NUM;
const int tid = threadIdx.x;
int tmp_sum = 0 ;
clock_t start = clock();
for ( int i = tid*size ; i < (tid+1)*size; ++i) {
tmp_sum += data[i] * data[i];
}
sum[tid] = tmp_sum;
*time = clock() - start;
}
// ======== used to generate rand datas ========
void generateData( int *data, int size) {
printf("generateData !");
for ( int i = 0 ; i < size; ++i) {
data[i] = rand() % 10 ;
}
}
void printDeviceProp(const cudaDeviceProp &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %lu.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %lu.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %lu.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %lu.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %lu.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
bool InitCUDA()
{
//used to count the device numbers
int count;
// get the cuda device count
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
// find the device >= 1.X
bool noDeviceSupport = true;
int i;
for (i = 0; i < count; ++i) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
noDeviceSupport = false;
printf("****** Device No%d*********************************\n",i);
printDeviceProp(prop);
printf("\n");
}
}
}
// if can't find the device
if (noDeviceSupport == true) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
cudaSetDevice(4);
printf ( "Device No%d is selected.\n",4 );
return true;
}
|
12e4bb4022e89b3cb5f9f7071f9ea494341da8fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add_weighted_kernel(unsigned int batchSize, unsigned int nbOutputs, unsigned int outputsHeight, unsigned int outputsWidth, float* estimated_labels, unsigned int nbChannels, unsigned int image_height, unsigned int image_width, float* input_image, unsigned char* workspace, float alpha)
{
const int batchEstimatedOffset = nbOutputs * outputsHeight * outputsWidth * blockIdx.z;
const int batchImageOffset = nbChannels * image_height * image_width * blockIdx.z;
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < outputsWidth * outputsHeight; i += stride)
{
unsigned int outputMax = 0;
if (nbOutputs > 1)
{
float maxVal = estimated_labels[i + batchEstimatedOffset];
for (unsigned int cls = 1; cls < nbOutputs; ++cls) {
const float tmp = estimated_labels[i
+ cls*outputsWidth*outputsHeight
+ batchEstimatedOffset];
if (tmp > maxVal) {
outputMax = cls;
maxVal = tmp;
}
}
const unsigned char ch0
= (unsigned char) max(colors[outputMax%4][0]*alpha, min(255.0, colors[outputMax%4][0]*alpha + input_image[i + batchImageOffset]));
const unsigned char ch1
= (unsigned char) max(colors[outputMax%4][1]*alpha, min(255.0, colors[outputMax%4][1]*alpha + input_image[i + image_height*image_width + batchImageOffset]));
const unsigned char ch2
= (unsigned char) max(colors[outputMax%4][2]*alpha, min(255.0, colors[outputMax%4][2]*alpha + input_image[i + 2*image_height*image_width + batchImageOffset]));
workspace[i*3 + batchImageOffset] = ch0;
workspace[i*3 + 1 + batchImageOffset] = ch1;
workspace[i*3 + 2 + batchImageOffset] = ch2;
}
}
} | 12e4bb4022e89b3cb5f9f7071f9ea494341da8fa.cu | #include "includes.h"
__global__ void add_weighted_kernel(unsigned int batchSize, unsigned int nbOutputs, unsigned int outputsHeight, unsigned int outputsWidth, float* estimated_labels, unsigned int nbChannels, unsigned int image_height, unsigned int image_width, float* input_image, unsigned char* workspace, float alpha)
{
const int batchEstimatedOffset = nbOutputs * outputsHeight * outputsWidth * blockIdx.z;
const int batchImageOffset = nbChannels * image_height * image_width * blockIdx.z;
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < outputsWidth * outputsHeight; i += stride)
{
unsigned int outputMax = 0;
if (nbOutputs > 1)
{
float maxVal = estimated_labels[i + batchEstimatedOffset];
for (unsigned int cls = 1; cls < nbOutputs; ++cls) {
const float tmp = estimated_labels[i
+ cls*outputsWidth*outputsHeight
+ batchEstimatedOffset];
if (tmp > maxVal) {
outputMax = cls;
maxVal = tmp;
}
}
const unsigned char ch0
= (unsigned char) max(colors[outputMax%4][0]*alpha, min(255.0, colors[outputMax%4][0]*alpha + input_image[i + batchImageOffset]));
const unsigned char ch1
= (unsigned char) max(colors[outputMax%4][1]*alpha, min(255.0, colors[outputMax%4][1]*alpha + input_image[i + image_height*image_width + batchImageOffset]));
const unsigned char ch2
= (unsigned char) max(colors[outputMax%4][2]*alpha, min(255.0, colors[outputMax%4][2]*alpha + input_image[i + 2*image_height*image_width + batchImageOffset]));
workspace[i*3 + batchImageOffset] = ch0;
workspace[i*3 + 1 + batchImageOffset] = ch1;
workspace[i*3 + 2 + batchImageOffset] = ch2;
}
}
} |
26b89d566984fcc1c476d242a90c374c67a4abfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#define CUDA_NUM_THREADS 256
#define CUDA_MAX_THREADS 256
// #define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#define EPS 1e-8
#define SAFE_DIV(a, b) ( (b==0)? ( (a)/(EPS) ): ( (a)/(b) ) )
#define CHECK_LEGALITY(x, min, max) ((x>=min && x<=max)? (true):(false) )
template <typename scalar_t>
__global__ void kernel_block_extractor_update_output(const int n,
const scalar_t* __restrict__ source,
const long4 source_size,
const long4 source_stride,
const scalar_t* __restrict__ flow_field,
const long4 flow_field_size,
const long4 flow_field_stride,
scalar_t* __restrict__ output,
const long4 output_size,
const long4 output_stride,
int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = DIM0(output_stride);
int dim_hw = DIM1(output_stride);
int dim_hs = DIM2(source_size);
int dim_ws = DIM3(source_size);
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int yf = y/kernel_size;
int xf = x/kernel_size;
int yf_offset = y%kernel_size - kernel_size/2;
int xf_offset = x%kernel_size - kernel_size/2;
scalar_t flow_y = DIM3_INDEX(flow_field, b, 1, yf, xf) + yf_offset;
scalar_t flow_x = DIM3_INDEX(flow_field, b, 0, yf, xf) + xf_offset;
scalar_t dy = flow_y + static_cast<scalar_t>(yf);
scalar_t dx = flow_x + static_cast<scalar_t>(xf);
int xL = max(min( int(floor(dx) ), dim_ws-1), 0);
int xR = max(min( int(floor(dx) + 1), dim_ws-1), 0);
int yT = max(min( int(floor(dy) ), dim_hs-1), 0);
int yB = max(min( int(floor(dy) + 1), dim_hs-1), 0);
scalar_t xL_P = 1 - (dx - floor(dx));
scalar_t xR_P = dx - floor(dx);
scalar_t yT_P = 1 - (dy - floor(dy));
scalar_t yB_P = dy - floor(dy);
scalar_t sample = 0.0f;
sample += (xL_P*yT_P * DIM3_INDEX(source, b, c, yT, xL));
sample += (xR_P*yT_P * DIM3_INDEX(source, b, c, yT, xR));
sample += (xL_P*yB_P * DIM3_INDEX(source, b, c, yB, xL));
sample += (xR_P*yB_P * DIM3_INDEX(source, b, c, yB, xR));
output[index] = sample;
}
template <typename scalar_t>
__global__ void kernel_block_extractor_backward(
const int n,
const scalar_t* __restrict__ source,
const long4 source_size,
const long4 source_stride,
const scalar_t* __restrict__ flow_field,
const long4 flow_field_size,
const long4 flow_field_stride,
const scalar_t* __restrict__ grad_output,
const long4 grad_output_size,
const long4 grad_output_stride,
scalar_t* __restrict__ grad_source,
const long4 grad_source_size,
const long4 grad_source_stride,
scalar_t* __restrict__ grad_flow_field,
const long4 grad_flow_field_size,
const long4 grad_flow_field_stride,
int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(grad_output_size);
int dim_c = DIM1(grad_output_size);
int dim_h = DIM2(grad_output_size);
int dim_w = DIM3(grad_output_size);
int dim_chw = DIM0(grad_output_stride);
int dim_hw = DIM1(grad_output_stride);
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int dim_hs = DIM2(source_size);
int dim_ws = DIM3(source_size);
int yf = y/kernel_size;
int xf = x/kernel_size;
int yf_offset = y%kernel_size - kernel_size/2;
int xf_offset = x%kernel_size - kernel_size/2;
scalar_t flow_y = DIM3_INDEX(flow_field, b, 1, yf, xf) + yf_offset;
scalar_t flow_x = DIM3_INDEX(flow_field, b, 0, yf, xf) + xf_offset;
scalar_t dy = flow_y + static_cast<scalar_t>(yf);
scalar_t dx = flow_x + static_cast<scalar_t>(xf);
int xL = max(min( int(floor(dx) ), dim_ws-1), 0);
int xR = max(min( int(floor(dx) + 1), dim_ws-1), 0);
int yT = max(min( int(floor(dy) ), dim_hs-1), 0);
int yB = max(min( int(floor(dy) + 1), dim_hs-1), 0);
scalar_t xL_P = 1 - (dx - floor(dx));
scalar_t xR_P = dx - floor(dx);
scalar_t yT_P = 1 - (dy - floor(dy));
scalar_t yB_P = dy - floor(dy);
scalar_t xL_yT = DIM3_INDEX(source, b, c, yT, xL);
scalar_t xR_yT = DIM3_INDEX(source, b, c, yT, xR);
scalar_t xL_yB = DIM3_INDEX(source, b, c, yB, xL);
scalar_t xR_yB = DIM3_INDEX(source, b, c, yB, xR);
scalar_t grad = DIM3_INDEX(grad_output, b, c, y, x);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yT, xL), grad*xL_P*yT_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yT, xR), grad*xR_P*yT_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yB, xL), grad*xL_P*yB_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yB, xR), grad*xR_P*yB_P);
scalar_t grady = grad*(-xL_P*xL_yT - xR_P*xR_yT + xL_P*xL_yB + xR_P*xR_yB);
scalar_t gradx = grad*(-yT_P*xL_yT - yB_P*xL_yB + yT_P*xR_yT + yB_P*xR_yB);
atomicAdd(&DIM3_INDEX(grad_flow_field, b, 1, yf, xf), grady);
atomicAdd(&DIM3_INDEX(grad_flow_field, b, 0, yf, xf), gradx);
}
void block_extractor_kernel_forward(
at::Tensor& source,
at::Tensor& flow_field,
at::Tensor& output,
int kernel_size) {
// clock_t start, end;
// start = clock();
int n = output.numel();
const long4 source_size = make_long4(source.size(0), source.size(1), source.size(2), source.size(3));
const long4 source_stride = make_long4(source.stride(0), source.stride(1), source.stride(2), source.stride(3));
const long4 flow_field_size = make_long4(flow_field.size(0), flow_field.size(1), flow_field.size(2), flow_field.size(3));
const long4 flow_field_stride = make_long4(flow_field.stride(0), flow_field.stride(1), flow_field.stride(2), flow_field.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
int Threads = CUDA_NUM_THREADS;
const dim3 threads(Threads);
const dim3 blocks((n + Threads - 1) / Threads);
AT_DISPATCH_FLOATING_TYPES(source.type(), "block_extractor_forward_kernel", ([&] {
hipLaunchKernelGGL(( kernel_block_extractor_update_output<scalar_t>), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
n,
source.data<scalar_t>(),
source_size,
source_stride,
flow_field.data<scalar_t>(),
flow_field_size,
flow_field_stride,
output.data<scalar_t>(),
output_size,
output_stride,
kernel_size);
}));
// end = clock();
// printf("%d\n", end-start);
// TODO: ATen-equivalent check
// THCudaCheck(hipGetLastError());
}
void block_extractor_kernel_backward(
at::Tensor& source,
at::Tensor& flow_field,
at::Tensor& grad_output,
at::Tensor& grad_source,
at::Tensor& grad_flow_field,
int kernel_size) {
int n = grad_output.numel();
const long4 source_size = make_long4(source.size(0), source.size(1), source.size(2), source.size(3));
const long4 source_stride = make_long4(source.stride(0), source.stride(1), source.stride(2), source.stride(3));
const long4 flow_field_size = make_long4(flow_field.size(0), flow_field.size(1), flow_field.size(2), flow_field.size(3));
const long4 flow_field_stride = make_long4(flow_field.stride(0), flow_field.stride(1), flow_field.stride(2), flow_field.stride(3));
const long4 grad_output_size = make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3));
const long4 grad_output_stride = make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3));
const long4 grad_source_size = make_long4(grad_source.size(0), grad_source.size(1), grad_source.size(2), grad_source.size(3));
const long4 grad_source_stride = make_long4(grad_source.stride(0), grad_source.stride(1), grad_source.stride(2), grad_source.stride(3));
const long4 grad_flow_field_size = make_long4(grad_flow_field.size(0), grad_flow_field.size(1), grad_flow_field.size(2), grad_flow_field.size(3));
const long4 grad_flow_field_stride = make_long4(grad_flow_field.stride(0), grad_flow_field.stride(1), grad_flow_field.stride(2), grad_flow_field.stride(3));
int Threads = CUDA_NUM_THREADS;
const dim3 threads(Threads);
const dim3 blocks((n + Threads - 1) / Threads);
AT_DISPATCH_FLOATING_TYPES(source.type(), "block_extractor_backward", ([&] {
hipLaunchKernelGGL(( kernel_block_extractor_backward<scalar_t>), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
n,
source.data<scalar_t>(),
source_size,
source_stride,
flow_field.data<scalar_t>(),
flow_field_size,
flow_field_stride,
grad_output.data<scalar_t>(),
grad_output_size,
grad_output_stride,
grad_source.data<scalar_t>(),
grad_source_size,
grad_source_stride,
grad_flow_field.data<scalar_t>(),
grad_flow_field_size,
grad_flow_field_stride,
kernel_size);
}));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(hipGetLastError());
} | 26b89d566984fcc1c476d242a90c374c67a4abfa.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#define CUDA_NUM_THREADS 256
#define CUDA_MAX_THREADS 256
// #define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#define EPS 1e-8
#define SAFE_DIV(a, b) ( (b==0)? ( (a)/(EPS) ): ( (a)/(b) ) )
#define CHECK_LEGALITY(x, min, max) ((x>=min && x<=max)? (true):(false) )
template <typename scalar_t>
__global__ void kernel_block_extractor_update_output(const int n,
const scalar_t* __restrict__ source,
const long4 source_size,
const long4 source_stride,
const scalar_t* __restrict__ flow_field,
const long4 flow_field_size,
const long4 flow_field_stride,
scalar_t* __restrict__ output,
const long4 output_size,
const long4 output_stride,
int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = DIM0(output_stride);
int dim_hw = DIM1(output_stride);
int dim_hs = DIM2(source_size);
int dim_ws = DIM3(source_size);
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int yf = y/kernel_size;
int xf = x/kernel_size;
int yf_offset = y%kernel_size - kernel_size/2;
int xf_offset = x%kernel_size - kernel_size/2;
scalar_t flow_y = DIM3_INDEX(flow_field, b, 1, yf, xf) + yf_offset;
scalar_t flow_x = DIM3_INDEX(flow_field, b, 0, yf, xf) + xf_offset;
scalar_t dy = flow_y + static_cast<scalar_t>(yf);
scalar_t dx = flow_x + static_cast<scalar_t>(xf);
int xL = max(min( int(floor(dx) ), dim_ws-1), 0);
int xR = max(min( int(floor(dx) + 1), dim_ws-1), 0);
int yT = max(min( int(floor(dy) ), dim_hs-1), 0);
int yB = max(min( int(floor(dy) + 1), dim_hs-1), 0);
scalar_t xL_P = 1 - (dx - floor(dx));
scalar_t xR_P = dx - floor(dx);
scalar_t yT_P = 1 - (dy - floor(dy));
scalar_t yB_P = dy - floor(dy);
scalar_t sample = 0.0f;
sample += (xL_P*yT_P * DIM3_INDEX(source, b, c, yT, xL));
sample += (xR_P*yT_P * DIM3_INDEX(source, b, c, yT, xR));
sample += (xL_P*yB_P * DIM3_INDEX(source, b, c, yB, xL));
sample += (xR_P*yB_P * DIM3_INDEX(source, b, c, yB, xR));
output[index] = sample;
}
template <typename scalar_t>
__global__ void kernel_block_extractor_backward(
const int n,
const scalar_t* __restrict__ source,
const long4 source_size,
const long4 source_stride,
const scalar_t* __restrict__ flow_field,
const long4 flow_field_size,
const long4 flow_field_stride,
const scalar_t* __restrict__ grad_output,
const long4 grad_output_size,
const long4 grad_output_stride,
scalar_t* __restrict__ grad_source,
const long4 grad_source_size,
const long4 grad_source_stride,
scalar_t* __restrict__ grad_flow_field,
const long4 grad_flow_field_size,
const long4 grad_flow_field_stride,
int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(grad_output_size);
int dim_c = DIM1(grad_output_size);
int dim_h = DIM2(grad_output_size);
int dim_w = DIM3(grad_output_size);
int dim_chw = DIM0(grad_output_stride);
int dim_hw = DIM1(grad_output_stride);
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int dim_hs = DIM2(source_size);
int dim_ws = DIM3(source_size);
int yf = y/kernel_size;
int xf = x/kernel_size;
int yf_offset = y%kernel_size - kernel_size/2;
int xf_offset = x%kernel_size - kernel_size/2;
scalar_t flow_y = DIM3_INDEX(flow_field, b, 1, yf, xf) + yf_offset;
scalar_t flow_x = DIM3_INDEX(flow_field, b, 0, yf, xf) + xf_offset;
scalar_t dy = flow_y + static_cast<scalar_t>(yf);
scalar_t dx = flow_x + static_cast<scalar_t>(xf);
int xL = max(min( int(floor(dx) ), dim_ws-1), 0);
int xR = max(min( int(floor(dx) + 1), dim_ws-1), 0);
int yT = max(min( int(floor(dy) ), dim_hs-1), 0);
int yB = max(min( int(floor(dy) + 1), dim_hs-1), 0);
scalar_t xL_P = 1 - (dx - floor(dx));
scalar_t xR_P = dx - floor(dx);
scalar_t yT_P = 1 - (dy - floor(dy));
scalar_t yB_P = dy - floor(dy);
scalar_t xL_yT = DIM3_INDEX(source, b, c, yT, xL);
scalar_t xR_yT = DIM3_INDEX(source, b, c, yT, xR);
scalar_t xL_yB = DIM3_INDEX(source, b, c, yB, xL);
scalar_t xR_yB = DIM3_INDEX(source, b, c, yB, xR);
scalar_t grad = DIM3_INDEX(grad_output, b, c, y, x);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yT, xL), grad*xL_P*yT_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yT, xR), grad*xR_P*yT_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yB, xL), grad*xL_P*yB_P);
atomicAdd(&DIM3_INDEX(grad_source, b, c, yB, xR), grad*xR_P*yB_P);
scalar_t grady = grad*(-xL_P*xL_yT - xR_P*xR_yT + xL_P*xL_yB + xR_P*xR_yB);
scalar_t gradx = grad*(-yT_P*xL_yT - yB_P*xL_yB + yT_P*xR_yT + yB_P*xR_yB);
atomicAdd(&DIM3_INDEX(grad_flow_field, b, 1, yf, xf), grady);
atomicAdd(&DIM3_INDEX(grad_flow_field, b, 0, yf, xf), gradx);
}
void block_extractor_kernel_forward(
at::Tensor& source,
at::Tensor& flow_field,
at::Tensor& output,
int kernel_size) {
// clock_t start, end;
// start = clock();
int n = output.numel();
const long4 source_size = make_long4(source.size(0), source.size(1), source.size(2), source.size(3));
const long4 source_stride = make_long4(source.stride(0), source.stride(1), source.stride(2), source.stride(3));
const long4 flow_field_size = make_long4(flow_field.size(0), flow_field.size(1), flow_field.size(2), flow_field.size(3));
const long4 flow_field_stride = make_long4(flow_field.stride(0), flow_field.stride(1), flow_field.stride(2), flow_field.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
int Threads = CUDA_NUM_THREADS;
const dim3 threads(Threads);
const dim3 blocks((n + Threads - 1) / Threads);
AT_DISPATCH_FLOATING_TYPES(source.type(), "block_extractor_forward_kernel", ([&] {
kernel_block_extractor_update_output<scalar_t><<< blocks, threads, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
source.data<scalar_t>(),
source_size,
source_stride,
flow_field.data<scalar_t>(),
flow_field_size,
flow_field_stride,
output.data<scalar_t>(),
output_size,
output_stride,
kernel_size);
}));
// end = clock();
// printf("%d\n", end-start);
// TODO: ATen-equivalent check
// THCudaCheck(cudaGetLastError());
}
void block_extractor_kernel_backward(
at::Tensor& source,
at::Tensor& flow_field,
at::Tensor& grad_output,
at::Tensor& grad_source,
at::Tensor& grad_flow_field,
int kernel_size) {
int n = grad_output.numel();
const long4 source_size = make_long4(source.size(0), source.size(1), source.size(2), source.size(3));
const long4 source_stride = make_long4(source.stride(0), source.stride(1), source.stride(2), source.stride(3));
const long4 flow_field_size = make_long4(flow_field.size(0), flow_field.size(1), flow_field.size(2), flow_field.size(3));
const long4 flow_field_stride = make_long4(flow_field.stride(0), flow_field.stride(1), flow_field.stride(2), flow_field.stride(3));
const long4 grad_output_size = make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3));
const long4 grad_output_stride = make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3));
const long4 grad_source_size = make_long4(grad_source.size(0), grad_source.size(1), grad_source.size(2), grad_source.size(3));
const long4 grad_source_stride = make_long4(grad_source.stride(0), grad_source.stride(1), grad_source.stride(2), grad_source.stride(3));
const long4 grad_flow_field_size = make_long4(grad_flow_field.size(0), grad_flow_field.size(1), grad_flow_field.size(2), grad_flow_field.size(3));
const long4 grad_flow_field_stride = make_long4(grad_flow_field.stride(0), grad_flow_field.stride(1), grad_flow_field.stride(2), grad_flow_field.stride(3));
int Threads = CUDA_NUM_THREADS;
const dim3 threads(Threads);
const dim3 blocks((n + Threads - 1) / Threads);
AT_DISPATCH_FLOATING_TYPES(source.type(), "block_extractor_backward", ([&] {
kernel_block_extractor_backward<scalar_t><<< blocks, threads, 0, at::cuda::getCurrentCUDAStream() >>>(
n,
source.data<scalar_t>(),
source_size,
source_stride,
flow_field.data<scalar_t>(),
flow_field_size,
flow_field_stride,
grad_output.data<scalar_t>(),
grad_output_size,
grad_output_stride,
grad_source.data<scalar_t>(),
grad_source_size,
grad_source_stride,
grad_flow_field.data<scalar_t>(),
grad_flow_field_size,
grad_flow_field_stride,
kernel_size);
}));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(cudaGetLastError());
} |
f58aca21d1539b67f8275e80aed7c880038d5dc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hpx/hpx_main.hpp> // we don't need an hpx_main that way?
#include <hpx/include/async.hpp>
#include <hpx/include/lcos.hpp>
#include "../include/buffer_manager.hpp"
#include "../include/cuda_helper.hpp"
#include <cstdio>
#include <typeinfo>
#include <chrono>
#include <random>
constexpr size_t N = 20000000;
// constexpr size_t chunksize = 20000 ;
constexpr size_t chunksize = 200000 ;
constexpr size_t passes = 100;
template<class T, size_t N>
class cuda_channel {
private:
std::vector<T, recycle_allocator_cuda_host<T>> host_side_buffer;
public:
cuda_device_buffer<T> device_side_buffer;
cuda_channel(void) : host_side_buffer(N), device_side_buffer(N) {
}
void cp_from_device(cuda_helper &interface) {
interface.copy_async(host_side_buffer.data(), device_side_buffer.device_side_buffer,
N * sizeof(T), hipMemcpyDeviceToHost);
}
void cp_to_device(cuda_helper &interface) {
interface.copy_async(device_side_buffer.device_side_buffer, host_side_buffer.data(),
N * sizeof(T), hipMemcpyHostToDevice);
}
const T& operator [] (size_t index) const {return host_side_buffer[index];}
T& operator [] (size_t index) {return host_side_buffer[index];}
};
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
__global__
void mv(const size_t startindex,const size_t chunksize,const size_t N,
double *y, double *erg) {
// Matrix is row major
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < chunksize) {
double tmp = 0.0;
int start_row = (startindex + i) * N;
for (size_t i = 0; i < N; i ++) {
double a = (i + start_row) % 2;
tmp += a * y[i];
}
erg[i] = tmp;
}
}
// #pragma nv_exec_check_disable
int main(int argc, char *argv[])
{
thread_local cuda_helper cuda_interface(0); //one stream per HPX thread
// Generate Problem: Repeated (unpotizmized) Matrix-Vec
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(-1.0, 1.0);
std::vector<double> mult_vector(N + chunksize);
std::vector<double> erg_vector(N + chunksize);
for (auto &entry : mult_vector) {
entry = dis(gen);
}
// Nonensical function, used to simulate stuff happening on the cpu
auto cpu_side_function = [](auto &partial_result, size_t chunksize) {
for (size_t i = 0; i < chunksize; i++)
partial_result[i] *= 0.5;
};
// Begin iterations
constexpr size_t number_packages = N / chunksize + static_cast<int>(N % chunksize);
size_t problemsize = N; // interface.execute does not like constexpr so we got these
size_t chunk = chunksize;
std::array<hpx::future<void>, number_packages> futs;
for (size_t i = 0; i < number_packages; i++) {
futs[i]= hpx::make_ready_future<void>();
}
auto begin = std::chrono::high_resolution_clock::now();
for (size_t pass = 0; pass < passes; pass++) {
// Divide into work packages - Create tasks
size_t fut_index = 0;
for(size_t row_index = 0; row_index < N; row_index += chunksize, fut_index++) {
futs[fut_index] = futs[fut_index].then(
[row_index, &erg_vector, mult_vector, &problemsize, &chunk, &cpu_side_function](hpx::future<void> &&f) {
// Recycle communication channels
cuda_channel<double, chunksize> input;
cuda_channel<double, chunksize> erg;
// Copy into input array
for (size_t i = 0; i < chunksize; i++) {
input[i] = mult_vector[row_index + i];
}
input.cp_to_device(cuda_interface);
// Launch execution
size_t row = row_index;
dim3 const grid_spec((chunksize + 127)/128, 1, 1);
dim3 const threads_per_block(128, 0, 0);
void* args[] = {&row, &chunk, &problemsize,
&(input.device_side_buffer), &(erg.device_side_buffer)};
cuda_interface.execute(reinterpret_cast<void const*>(&mv), grid_spec, threads_per_block, args, 0);
// Copy results back
erg.cp_from_device(cuda_interface);
// Jump away
auto fut = cuda_interface.get_future();
fut.get();
// To CPU side function
cpu_side_function(erg, chunk);
for (size_t i = 0; i < chunksize; i++) {
std::cerr << row_index + i << " " << i << ";";
erg_vector[row_index + i] = input[i];
}
});
}
}
auto end = std::chrono::high_resolution_clock::now();
std::cout << "\n==>Mults took " << std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << "ms" << std::endl;
}
| f58aca21d1539b67f8275e80aed7c880038d5dc1.cu | #include <hpx/hpx_main.hpp> // we don't need an hpx_main that way?
#include <hpx/include/async.hpp>
#include <hpx/include/lcos.hpp>
#include "../include/buffer_manager.hpp"
#include "../include/cuda_helper.hpp"
#include <cstdio>
#include <typeinfo>
#include <chrono>
#include <random>
constexpr size_t N = 20000000;
// constexpr size_t chunksize = 20000 ;
constexpr size_t chunksize = 200000 ;
constexpr size_t passes = 100;
template<class T, size_t N>
class cuda_channel {
private:
std::vector<T, recycle_allocator_cuda_host<T>> host_side_buffer;
public:
cuda_device_buffer<T> device_side_buffer;
cuda_channel(void) : host_side_buffer(N), device_side_buffer(N) {
}
void cp_from_device(cuda_helper &interface) {
interface.copy_async(host_side_buffer.data(), device_side_buffer.device_side_buffer,
N * sizeof(T), cudaMemcpyDeviceToHost);
}
void cp_to_device(cuda_helper &interface) {
interface.copy_async(device_side_buffer.device_side_buffer, host_side_buffer.data(),
N * sizeof(T), cudaMemcpyHostToDevice);
}
const T& operator [] (size_t index) const {return host_side_buffer[index];}
T& operator [] (size_t index) {return host_side_buffer[index];}
};
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
__global__
void mv(const size_t startindex,const size_t chunksize,const size_t N,
double *y, double *erg) {
// Matrix is row major
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < chunksize) {
double tmp = 0.0;
int start_row = (startindex + i) * N;
for (size_t i = 0; i < N; i ++) {
double a = (i + start_row) % 2;
tmp += a * y[i];
}
erg[i] = tmp;
}
}
// #pragma nv_exec_check_disable
int main(int argc, char *argv[])
{
thread_local cuda_helper cuda_interface(0); //one stream per HPX thread
// Generate Problem: Repeated (unpotizmized) Matrix-Vec
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(-1.0, 1.0);
std::vector<double> mult_vector(N + chunksize);
std::vector<double> erg_vector(N + chunksize);
for (auto &entry : mult_vector) {
entry = dis(gen);
}
// Nonensical function, used to simulate stuff happening on the cpu
auto cpu_side_function = [](auto &partial_result, size_t chunksize) {
for (size_t i = 0; i < chunksize; i++)
partial_result[i] *= 0.5;
};
// Begin iterations
constexpr size_t number_packages = N / chunksize + static_cast<int>(N % chunksize);
size_t problemsize = N; // interface.execute does not like constexpr so we got these
size_t chunk = chunksize;
std::array<hpx::future<void>, number_packages> futs;
for (size_t i = 0; i < number_packages; i++) {
futs[i]= hpx::make_ready_future<void>();
}
auto begin = std::chrono::high_resolution_clock::now();
for (size_t pass = 0; pass < passes; pass++) {
// Divide into work packages - Create tasks
size_t fut_index = 0;
for(size_t row_index = 0; row_index < N; row_index += chunksize, fut_index++) {
futs[fut_index] = futs[fut_index].then(
[row_index, &erg_vector, mult_vector, &problemsize, &chunk, &cpu_side_function](hpx::future<void> &&f) {
// Recycle communication channels
cuda_channel<double, chunksize> input;
cuda_channel<double, chunksize> erg;
// Copy into input array
for (size_t i = 0; i < chunksize; i++) {
input[i] = mult_vector[row_index + i];
}
input.cp_to_device(cuda_interface);
// Launch execution
size_t row = row_index;
dim3 const grid_spec((chunksize + 127)/128, 1, 1);
dim3 const threads_per_block(128, 0, 0);
void* args[] = {&row, &chunk, &problemsize,
&(input.device_side_buffer), &(erg.device_side_buffer)};
cuda_interface.execute(reinterpret_cast<void const*>(&mv), grid_spec, threads_per_block, args, 0);
// Copy results back
erg.cp_from_device(cuda_interface);
// Jump away
auto fut = cuda_interface.get_future();
fut.get();
// To CPU side function
cpu_side_function(erg, chunk);
for (size_t i = 0; i < chunksize; i++) {
std::cerr << row_index + i << " " << i << ";";
erg_vector[row_index + i] = input[i];
}
});
}
}
auto end = std::chrono::high_resolution_clock::now();
std::cout << "\n==>Mults took " << std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << "ms" << std::endl;
}
|
42553a7d9859333881e5e14cdfd0c534a8a5555b.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cstdlib>
#include <string>
#include "hipcub/hipcub.hpp"
#include "cnmem.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t error = condition; \
CHECK_EQ(error, CNMEM_STATUS_SUCCESS) << cnmemGetErrorString(error); \
} while (0)
DEFINE_string(caffe2_cuda_memory_pool, "",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmen and cub.");
DEFINE_double(caffe2_cnmem_reserve, 0.8,
"Sets the proportion of memory pre-allocated by the memory "
"pool if you use cnmem.");
DEFINE_string(caffe2_cnmem_gpus, "",
"A comma separated list containing the index of gpus that "
"we will set the memory pool on. If not set, we will set "
"up the memory pool on all available GPUs. This only applies "
"to cnmem.");
// TODO(jiayq): Figure out the best default values for the params below.
// Currently we are using the setting copied from caffe.
DEFINE_int32(caffe2_cub_bin_growth, 2,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
DEFINE_int32(caffe2_cub_min_bin, 6,
"If using cub as the memory allocator, sets the min number of "
"bins.");
DEFINE_int32(caffe2_cub_max_bin, 16,
"If using cub as the memory allocator, sets the max number of "
"bins.");
namespace caffe2 {
CAFFE_KNOWN_TYPE(Tensor<CUDAContext>);
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
bool g_memory_allocation_already_called = false;
// For cnmem allocator
vector<bool> g_cnmem_available_for_device(NumCudaDevices(), false);
// For cub allocator
unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
void* CUDAContext::New(size_t nbytes) {
g_memory_allocation_already_called = true;
void* ptr = nullptr;
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_CHECK(hipMalloc(&ptr, nbytes));
return ptr;
case CudaMemoryPoolType::CNMEM:
CAFFE_ENFORCE(
g_cnmem_available_for_device[GetCurrentGPUID()],
"Trying to allocate on device ", GetCurrentGPUID(),
" but cnmem pool is not set up for it.");
CNMEM_CHECK(cnmemMalloc(&ptr, nbytes, nullptr));
return ptr;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
return ptr;
}
return nullptr;
}
void CUDAContext::Delete(void* ptr) {
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple hipFree.
hipError_t error = hipFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != hipSuccess && error != hipErrorDeinitialized) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(error);
}
break; }
case CudaMemoryPoolType::CNMEM:
CNMEM_CHECK(cnmemFree(ptr, nullptr));
break;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceFree(ptr));
break;
}
}
static void SetUpCNMEM() {
VLOG(1) << "Setting up cnmem memory pool.";
vector<int> device_ids;
// If the cnmem gpus are not set, set up all gpus.
if (FLAGS_caffe2_cnmem_gpus.size() == 0) {
device_ids.resize(NumCudaDevices());
for (int i = 0; i < device_ids.size(); ++i) {
device_ids[i] = i;
}
} else {
vector<string> device_ids_str = split(',', FLAGS_caffe2_cnmem_gpus);
for (const string& id_str : device_ids_str) {
int id = 0;
try {
id = std::stoi(id_str);
} catch (...) {
CAFFE_THROW(
"Cannot parse device id ",
id_str,
" to a valid int number.");
}
device_ids.push_back(id);
}
}
CAFFE_ENFORCE(FLAGS_caffe2_cnmem_reserve >= 0 &&
FLAGS_caffe2_cnmem_reserve < 1.0,
"caffe2_cnmem_reserve number must be in [0, 1)");
vector<cnmemDevice_t> cnmem_devs(device_ids.size());
for (int i = 0; i < device_ids.size(); ++i) {
const int id = device_ids[i];
CAFFE_ENFORCE(
id >= 0 && id < NumCudaDevices(),
"GPU id ", id, " out of the range of available GPUs.");
DeviceGuard guard(id);
size_t free, used;
CUDA_CHECK(hipMemGetInfo(&free, &used));
VLOG(1) << "Reserving " << FLAGS_caffe2_cnmem_reserve * 100
<< " percent of the free memory (total " << free
<< ") on device " << id;
// Note: we create a dummy non-null stream for memory allocations, so that
// any malloc can be called from any cuda stream, since caffe2 uses a lot of
// non-default streams for computation. We will allocate all the reserved
// memory to that non-null stream.
cnmem_devs[i].device = id;
cnmem_devs[i].size = size_t(FLAGS_caffe2_cnmem_reserve * free);
cnmem_devs[i].numStreams = 0;
cnmem_devs[i].streamSizes = nullptr;
g_cnmem_available_for_device[id] = true;
}
CNMEM_CHECK(
cnmemInit(cnmem_devs.size(), cnmem_devs.data(), CNMEM_FLAGS_DEFAULT));
VLOG(1) << "Done setting up cnmem memory pool.";
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
const bool k_cub_debug =
#ifdef NDEBUG
false;
#else
true;
#endif
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new hipcub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
static_cast<size_t>(-1),
false,
k_cub_debug));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
// Global initializtion function to set up the cuda memory pool during
// construction time.
bool Caffe2SetCUDAMemoryPool(int*, char***) {
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't set up cuda memory pool";
return true;
}
if (g_memory_allocation_already_called) {
LOG(ERROR) << "Caffe2SetCUDAMemoryPool should always be called before "
"any CUDAContext::New() calls are made.";
return false;
}
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
// sets up cnmem.
g_cuda_memory_pool_type = CudaMemoryPoolType::CNMEM;
SetUpCNMEM();
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
return true;
}
LOG(ERROR) << "Unrecognized cuda memory pool type: "
<< FLAGS_caffe2_cuda_memory_pool;
return false;
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
bool Caffe2UsePinnedCPUAllocator(int*, char***) {
#ifdef __SANITIZE_ADDRESS__
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"hipHostMalloc. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
return true;
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return true;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
return true;
#endif
}
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2SetCUDAMemoryPool,
&Caffe2SetCUDAMemoryPool,
"Sets up the cuda memory pool.");
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2UsePinnedCPUAllocator,
&Caffe2UsePinnedCPUAllocator,
"Make the CPU side use pinned memory.");
} // namespace caffe2
| 42553a7d9859333881e5e14cdfd0c534a8a5555b.cu | #include <algorithm>
#include <cstdlib>
#include <string>
#include "cub/util_allocator.cuh"
#include "cnmem.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t error = condition; \
CHECK_EQ(error, CNMEM_STATUS_SUCCESS) << cnmemGetErrorString(error); \
} while (0)
DEFINE_string(caffe2_cuda_memory_pool, "",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmen and cub.");
DEFINE_double(caffe2_cnmem_reserve, 0.8,
"Sets the proportion of memory pre-allocated by the memory "
"pool if you use cnmem.");
DEFINE_string(caffe2_cnmem_gpus, "",
"A comma separated list containing the index of gpus that "
"we will set the memory pool on. If not set, we will set "
"up the memory pool on all available GPUs. This only applies "
"to cnmem.");
// TODO(jiayq): Figure out the best default values for the params below.
// Currently we are using the setting copied from caffe.
DEFINE_int32(caffe2_cub_bin_growth, 2,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
DEFINE_int32(caffe2_cub_min_bin, 6,
"If using cub as the memory allocator, sets the min number of "
"bins.");
DEFINE_int32(caffe2_cub_max_bin, 16,
"If using cub as the memory allocator, sets the max number of "
"bins.");
namespace caffe2 {
CAFFE_KNOWN_TYPE(Tensor<CUDAContext>);
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
bool g_memory_allocation_already_called = false;
// For cnmem allocator
vector<bool> g_cnmem_available_for_device(NumCudaDevices(), false);
// For cub allocator
unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
void* CUDAContext::New(size_t nbytes) {
g_memory_allocation_already_called = true;
void* ptr = nullptr;
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_CHECK(cudaMalloc(&ptr, nbytes));
return ptr;
case CudaMemoryPoolType::CNMEM:
CAFFE_ENFORCE(
g_cnmem_available_for_device[GetCurrentGPUID()],
"Trying to allocate on device ", GetCurrentGPUID(),
" but cnmem pool is not set up for it.");
CNMEM_CHECK(cnmemMalloc(&ptr, nbytes, nullptr));
return ptr;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
return ptr;
}
return nullptr;
}
void CUDAContext::Delete(void* ptr) {
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple cudaFree.
cudaError_t error = cudaFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != cudaSuccess && error != cudaErrorCudartUnloading) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(error);
}
break; }
case CudaMemoryPoolType::CNMEM:
CNMEM_CHECK(cnmemFree(ptr, nullptr));
break;
case CudaMemoryPoolType::CUB:
CUDA_CHECK(g_cub_allocator->DeviceFree(ptr));
break;
}
}
static void SetUpCNMEM() {
VLOG(1) << "Setting up cnmem memory pool.";
vector<int> device_ids;
// If the cnmem gpus are not set, set up all gpus.
if (FLAGS_caffe2_cnmem_gpus.size() == 0) {
device_ids.resize(NumCudaDevices());
for (int i = 0; i < device_ids.size(); ++i) {
device_ids[i] = i;
}
} else {
vector<string> device_ids_str = split(',', FLAGS_caffe2_cnmem_gpus);
for (const string& id_str : device_ids_str) {
int id = 0;
try {
id = std::stoi(id_str);
} catch (...) {
CAFFE_THROW(
"Cannot parse device id ",
id_str,
" to a valid int number.");
}
device_ids.push_back(id);
}
}
CAFFE_ENFORCE(FLAGS_caffe2_cnmem_reserve >= 0 &&
FLAGS_caffe2_cnmem_reserve < 1.0,
"caffe2_cnmem_reserve number must be in [0, 1)");
vector<cnmemDevice_t> cnmem_devs(device_ids.size());
for (int i = 0; i < device_ids.size(); ++i) {
const int id = device_ids[i];
CAFFE_ENFORCE(
id >= 0 && id < NumCudaDevices(),
"GPU id ", id, " out of the range of available GPUs.");
DeviceGuard guard(id);
size_t free, used;
CUDA_CHECK(cudaMemGetInfo(&free, &used));
VLOG(1) << "Reserving " << FLAGS_caffe2_cnmem_reserve * 100
<< " percent of the free memory (total " << free
<< ") on device " << id;
// Note: we create a dummy non-null stream for memory allocations, so that
// any malloc can be called from any cuda stream, since caffe2 uses a lot of
// non-default streams for computation. We will allocate all the reserved
// memory to that non-null stream.
cnmem_devs[i].device = id;
cnmem_devs[i].size = size_t(FLAGS_caffe2_cnmem_reserve * free);
cnmem_devs[i].numStreams = 0;
cnmem_devs[i].streamSizes = nullptr;
g_cnmem_available_for_device[id] = true;
}
CNMEM_CHECK(
cnmemInit(cnmem_devs.size(), cnmem_devs.data(), CNMEM_FLAGS_DEFAULT));
VLOG(1) << "Done setting up cnmem memory pool.";
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
const bool k_cub_debug =
#ifdef NDEBUG
false;
#else
true;
#endif
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new cub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
static_cast<size_t>(-1),
false,
k_cub_debug));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
// Global initializtion function to set up the cuda memory pool during
// construction time.
bool Caffe2SetCUDAMemoryPool(int*, char***) {
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't set up cuda memory pool";
return true;
}
if (g_memory_allocation_already_called) {
LOG(ERROR) << "Caffe2SetCUDAMemoryPool should always be called before "
"any CUDAContext::New() calls are made.";
return false;
}
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
// sets up cnmem.
g_cuda_memory_pool_type = CudaMemoryPoolType::CNMEM;
SetUpCNMEM();
return true;
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
return true;
}
LOG(ERROR) << "Unrecognized cuda memory pool type: "
<< FLAGS_caffe2_cuda_memory_pool;
return false;
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
bool Caffe2UsePinnedCPUAllocator(int*, char***) {
#ifdef __SANITIZE_ADDRESS__
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"cudaMallocHost. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
return true;
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return true;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
return true;
#endif
}
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2SetCUDAMemoryPool,
&Caffe2SetCUDAMemoryPool,
"Sets up the cuda memory pool.");
REGISTER_CAFFE2_INIT_FUNCTION(Caffe2UsePinnedCPUAllocator,
&Caffe2UsePinnedCPUAllocator,
"Make the CPU side use pinned memory.");
} // namespace caffe2
|
5f9df2a818703858cbc6e333573e0e897d7fb802.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
hipError_t err = hipGetLastError();\
if( err != hipSuccess){\
printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
struct num_array{
double num1;
double num2;
double result;
};
__device__ void function(struct num_array *a)
{
double square = a ->num1 * a->num1 + a->num2 * a->num2 + 2 * a->num1 * a->num2;
a->result = log(square)/sin(square);
return;
}
__global__ void calculate(char *mem, int num,int rows,int blocks)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//int i = threadIdx.x;
int j = threadIdx.y;
if(i >= num)
return;
struct num_array *a = (struct num_array *)(mem +3*(rows*blocks*j+i)*sizeof(double));
function(a);
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
struct num_array *pa;
char *ptr;
char *sptr;
char *gpu_mem;
unsigned long num = NUM,rows,cols; /*Default value of num from MACRO*/
int blocks;
if(argc == 4){
num = atoi(argv[1]);
rows = atoi(argv[2]);
cols = atoi(argv[3]);
if(num <= 0)
num = NUM;
}
/* Allocate host (CPU) memory and initialize*/
ptr = (char *)malloc(num * 3 * sizeof(double));
sptr = ptr;
for(i=0; i<num; ++i){
pa = (struct num_array *) sptr;
pa->num1 = (double) i + (double) i * 0.1;
pa->num2 = pa->num1 + 1.0;
sptr += 3 * sizeof(double);
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
hipMalloc(&gpu_mem, num * 3 * sizeof(double));
CUDA_ERROR_EXIT("hipMalloc");
hipMemcpy(gpu_mem, ptr, num * 3 * sizeof(double) , hipMemcpyHostToDevice);
CUDA_ERROR_EXIT("hipMemcpy");
dim3 threadsperblock(rows,cols);
gettimeofday(&start, NULL);
blocks = num /1024;
if(num % 1024)
++blocks;
hipLaunchKernelGGL(( calculate), dim3(blocks), dim3(threadsperblock), 0, 0, gpu_mem, num,rows,blocks);
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
hipMemcpy(ptr, gpu_mem, num * 3 * sizeof(double) , hipMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
hipFree(gpu_mem);
sptr = ptr;
/*Print the last element for sanity check*/
pa = (struct num_array *) (sptr + (num-1)*3*sizeof(double));
printf("num1=%f num2=%f result=%f\n", pa->num1, pa->num2, pa->result);
free(ptr);
}
| 5f9df2a818703858cbc6e333573e0e897d7fb802.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
struct num_array{
double num1;
double num2;
double result;
};
__device__ void function(struct num_array *a)
{
double square = a ->num1 * a->num1 + a->num2 * a->num2 + 2 * a->num1 * a->num2;
a->result = log(square)/sin(square);
return;
}
__global__ void calculate(char *mem, int num,int rows,int blocks)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//int i = threadIdx.x;
int j = threadIdx.y;
if(i >= num)
return;
struct num_array *a = (struct num_array *)(mem +3*(rows*blocks*j+i)*sizeof(double));
function(a);
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
struct num_array *pa;
char *ptr;
char *sptr;
char *gpu_mem;
unsigned long num = NUM,rows,cols; /*Default value of num from MACRO*/
int blocks;
if(argc == 4){
num = atoi(argv[1]);
rows = atoi(argv[2]);
cols = atoi(argv[3]);
if(num <= 0)
num = NUM;
}
/* Allocate host (CPU) memory and initialize*/
ptr = (char *)malloc(num * 3 * sizeof(double));
sptr = ptr;
for(i=0; i<num; ++i){
pa = (struct num_array *) sptr;
pa->num1 = (double) i + (double) i * 0.1;
pa->num2 = pa->num1 + 1.0;
sptr += 3 * sizeof(double);
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_mem, num * 3 * sizeof(double));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, ptr, num * 3 * sizeof(double) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
dim3 threadsperblock(rows,cols);
gettimeofday(&start, NULL);
blocks = num /1024;
if(num % 1024)
++blocks;
calculate<<<blocks, threadsperblock>>>(gpu_mem, num,rows,blocks);
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(ptr, gpu_mem, num * 3 * sizeof(double) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
sptr = ptr;
/*Print the last element for sanity check*/
pa = (struct num_array *) (sptr + (num-1)*3*sizeof(double));
printf("num1=%f num2=%f result=%f\n", pa->num1, pa->num2, pa->result);
free(ptr);
}
|
38984fffdea273d70972dbff2058182bad58c78f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yololayer.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = exp(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = exp(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(hipMalloc(&devAnchor,AnchorLen));
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CUDA_CHECK(hipMemcpy(devAnchor, yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(hipFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 38984fffdea273d70972dbff2058182bad58c78f.cu | #include "yololayer.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = exp(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = exp(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen));
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CUDA_CHECK(cudaMemcpy(devAnchor, yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(cudaFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
b099c0009bd087ddb62043f25ee6a892290b7ccc.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hipfft.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include <dirent.h>
// Adds an additional library so that timeGetTime() can be used
#include <stdlib.h>
#include <time.h>
#include <omp.h>
typedef float2 Complex;
#define NX 256
#define BATCH 1
static __global__ void compareKernel(Complex* A, Complex* B, double* res, int sizeA, int sizeB)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
double valueA = 0.0;
double valueB = 0.0;
double diff = 99999999999999999999999.9;
res[(sizeA - sizeB) + 1] = 99999999999999999999999.9;
//double fab[sizeA];
//int i = threadID;
for (int i = threadID; i < ((sizeA - sizeB) + 1); i += numThreads) {
if ((i + (sizeB - 1)) < sizeA) {
res[i] = 0.0;
for (int j = 0; j < sizeB; j++) {
//valueA = sqrt(pow(A[i + j].x, 2) + pow(A[i + j].y, 2));
//valueB = sqrt(pow(B[i].x, 2) + pow(B[i].y, 2));
res[i] += fabs(sqrt(pow(A[i + j].x, 2) + pow(A[i + j].y, 2)) - sqrt(pow(B[j].x, 2) + pow(B[j].y, 2)));
}
if (res[i] < diff) {
diff = res[i];
}
}
res[(sizeA - sizeB) + 1] = diff;
}
//res[(sizeA - sizeB) + 1] = diff;
//res[1] = fab;
}
int readFile(int **grades, char *addr);
int CompareWav(char *path1, char *path2, double *a)
{
printf("%s --- %s --- \n", path1, path2);
printf("[simpleCUFFT] is starting...\n");
int *h_A_real = NULL;
unsigned int count_A;
count_A = readFile(&h_A_real, path1);
int *h_B_real = NULL;
unsigned int count_B;
count_B = readFile(&h_B_real, path2);
unsigned int count_C = (count_A - count_B) + 2;
Complex* h_A = (Complex*)malloc(sizeof(Complex) * count_A);
// Initalize the memory for the signal
for (unsigned int i = 0; i < count_A; ++i) {
//printf("Int is %d \n", h_A_real[i]);
h_A[i].x = h_A_real[i] + 0.0;
//printf("Num is %f \n", h_A[i].x);
h_A[i].y = 0;
}
Complex* h_B = (Complex*)malloc(sizeof(Complex) * count_B);
// Initalize the memory for the signal
for (unsigned int i = 0; i < count_B; ++i) {
//printf("Int is %d \n", h_B_real[i]);
h_B[i].x = h_B_real[i] + +0.0;;
//printf("Num is %f \n", h_B[i].x);
h_B[i].y = 0;
}
/*
// Pad signal and filter kernel
Complex* h_padded_signal;
Complex* h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
*/
int MinCount = count_B;
if (count_A < count_B) {
MinCount = count_A;
}
unsigned int MaxCount = count_B;
if (count_A > count_B) {
MaxCount = count_A;
}
unsigned int size_A = sizeof(Complex)* count_A;
unsigned int size_B = sizeof(Complex)* count_B;
unsigned int size_C = sizeof(double)* count_C;
// Allocate device memory for signal
Complex* d_A;
hipMalloc((void**)&d_A, size_A);
// Copy host memory to device
hipMemcpy(d_A, h_A, size_A,
hipMemcpyHostToDevice);
// Allocate device memory for filter kernel
Complex* d_B;
hipMalloc((void**)&d_B, size_B);
// Copy host memory to device
hipMemcpy(d_B, h_B, size_B,
hipMemcpyHostToDevice);
double *d_C;
hipMalloc((void**)&d_C, count_C * sizeof(double));
double *h_C = (double *)malloc(count_C * sizeof(double));
// CUFFT plan
hipfftHandle planA, planB;
hipfftPlan1d(&planA, count_A, HIPFFT_C2C, 1);
hipfftPlan1d(&planB, count_B, HIPFFT_C2C, 1);
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
hipfftExecC2C(planA, (hipfftComplex *)d_A, (hipfftComplex *)d_A, HIPFFT_FORWARD);
hipfftExecC2C(planB, (hipfftComplex *)d_B, (hipfftComplex *)d_B, HIPFFT_FORWARD);
// Multiply the coefficients together and normalize the result
printf("Launching compareKernel<<< >>>\n");
compareKernel << <32, 256 >> >(d_A, d_B, d_C, count_A, count_B);
// Transform signal back
printf("Transforming signal back hipfftExecC2C\n");
//hipfftExecC2C(plan, (hipfftComplex *)d_A, (hipfftComplex *)d_A, HIPFFT_BACKWARD);
// Copy device memory to host
int Min_size = sizeof(Complex) * count_A;
Complex* h_convolved_signal = (Complex*)malloc(sizeof(Complex) * count_A);
hipMemcpy(h_convolved_signal, d_A, size_A,
hipMemcpyDeviceToHost);
/*
for (int i = 0; i < count_A; i++) {
printf("FFT is %f %f \n", h_convolved_signal[i].x, h_convolved_signal[i].y);
}
*/
hipMemcpy(h_C, d_C, count_C * sizeof(double), hipMemcpyDeviceToHost);
printf("------Count IS: %d - %d = %d \n", count_A, count_B, count_C);
double min = 999999999.9;
for (int i = 0; i < count_C; i++) {
if (h_C[i] < min) {
min = h_C[i];
}
printf("------LAD IS: %f \n", h_C[i]);
}
//*a = h_C[(count_A - count_B) + 1];
*a = min;
printf("Min is: %f \n", min);
// Allocate host memory for the convolution result
//Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * size_A);
// Convolve on the host
//Destroy CUFFT context
hipfftDestroy(planA);
hipfftDestroy(planB);
// cleanup memory
h_A = NULL;
h_B = NULL;
h_C = NULL;
free(h_A);
free(h_B);
free(h_C);
h_A = NULL;
h_B = NULL;
h_C = NULL;
//free(h_padded_signal);
//free(h_padded_filter_kernel);
//free(h_convolved_signal_ref);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Clean up memory
return EXIT_SUCCESS;
}
void makeZero(double *a) {
*a = 0;
}
int readFile(int **grades, char *addr) {
FILE *fp;
int temp;
//grades = NULL;
int count = 1;
long index;
fp = fopen(addr, "rb+");
while (fscanf(fp, "%d", &temp) != EOF)
{
if (*grades == NULL)
{
*grades = (int *)malloc(sizeof(temp));
**grades = temp;
printf("The grade is %d\r\n", temp);
}
else
{
//printf("The grade is realloc %d\r\n", temp);
count++;
*grades = (int *)realloc(*grades, sizeof(int)*count);
index = count - 1;
(*grades)[index] = temp;
//printf("the index is %d\r\n",index);
}
}
printf("Done Total %d numbers \n", count);
fclose(fp);
temp = 0;
/*
while (index >= 0)
{
printf("the read value is %d\r\n", (*grades)[temp]);
index--;
temp++;
} */
return(count);
}
void concatenate_string(char *original, char *add)
{
while (*original)
original++;
while (*add)
{
*original = *add;
add++;
original++;
}
*original = '\0';
}
/**
* Program main
*/
int main(int argc, char **argv)
{
//char **strings1 = (char**)malloc(10 * sizeof(char*));
char arr1[10][30];
FILE * database;
char buffer1[30];
int Count1 = 0;
database = fopen("SongNames.txt", "r");
if (NULL == database)
{
perror("opening database");
return (-1);
}
while (EOF != fscanf(database, "%[^\n]\n", buffer1))
{
//printf("> %s\n", buffer1);
strcpy(arr1[Count1], buffer1);
Count1++;
}
fclose(database);
char arr2[10][30];
FILE * database2;
char buffer2[30];
int Count2 = 0;
database2 = fopen("SampleNames.txt", "r");
if (NULL == database2)
{
perror("opening database");
return (-1);
}
while (EOF != fscanf(database2, "%[^\n]\n", buffer2))
{
//printf("> %s\n", buffer2);
strcpy(arr2[Count2], buffer2);
Count2++;
}
fclose(database2);
for (int i = 0; i < Count1; i++) {
for (int j = 0; j < Count2; j++) {
double a = 0.0;
char path1[30] = "songs/";
char path2[30] = "samples/";
concatenate_string(path1, arr1[i]);
concatenate_string(path2, arr2[j]);
makeZero(&a);
CompareWav(path1, path2, &a);
printf("\%s >>> %s : Minimun LAD: %f\n", arr1[i], arr2[j], a);
}
}
}
| b099c0009bd087ddb62043f25ee6a892290b7ccc.cu | // System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cufft.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include <dirent.h>
// Adds an additional library so that timeGetTime() can be used
#include <stdlib.h>
#include <time.h>
#include <omp.h>
typedef float2 Complex;
#define NX 256
#define BATCH 1
static __global__ void compareKernel(Complex* A, Complex* B, double* res, int sizeA, int sizeB)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
double valueA = 0.0;
double valueB = 0.0;
double diff = 99999999999999999999999.9;
res[(sizeA - sizeB) + 1] = 99999999999999999999999.9;
//double fab[sizeA];
//int i = threadID;
for (int i = threadID; i < ((sizeA - sizeB) + 1); i += numThreads) {
if ((i + (sizeB - 1)) < sizeA) {
res[i] = 0.0;
for (int j = 0; j < sizeB; j++) {
//valueA = sqrt(pow(A[i + j].x, 2) + pow(A[i + j].y, 2));
//valueB = sqrt(pow(B[i].x, 2) + pow(B[i].y, 2));
res[i] += fabs(sqrt(pow(A[i + j].x, 2) + pow(A[i + j].y, 2)) - sqrt(pow(B[j].x, 2) + pow(B[j].y, 2)));
}
if (res[i] < diff) {
diff = res[i];
}
}
res[(sizeA - sizeB) + 1] = diff;
}
//res[(sizeA - sizeB) + 1] = diff;
//res[1] = fab;
}
int readFile(int **grades, char *addr);
int CompareWav(char *path1, char *path2, double *a)
{
printf("%s --- %s --- \n", path1, path2);
printf("[simpleCUFFT] is starting...\n");
int *h_A_real = NULL;
unsigned int count_A;
count_A = readFile(&h_A_real, path1);
int *h_B_real = NULL;
unsigned int count_B;
count_B = readFile(&h_B_real, path2);
unsigned int count_C = (count_A - count_B) + 2;
Complex* h_A = (Complex*)malloc(sizeof(Complex) * count_A);
// Initalize the memory for the signal
for (unsigned int i = 0; i < count_A; ++i) {
//printf("Int is %d \n", h_A_real[i]);
h_A[i].x = h_A_real[i] + 0.0;
//printf("Num is %f \n", h_A[i].x);
h_A[i].y = 0;
}
Complex* h_B = (Complex*)malloc(sizeof(Complex) * count_B);
// Initalize the memory for the signal
for (unsigned int i = 0; i < count_B; ++i) {
//printf("Int is %d \n", h_B_real[i]);
h_B[i].x = h_B_real[i] + +0.0;;
//printf("Num is %f \n", h_B[i].x);
h_B[i].y = 0;
}
/*
// Pad signal and filter kernel
Complex* h_padded_signal;
Complex* h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
*/
int MinCount = count_B;
if (count_A < count_B) {
MinCount = count_A;
}
unsigned int MaxCount = count_B;
if (count_A > count_B) {
MaxCount = count_A;
}
unsigned int size_A = sizeof(Complex)* count_A;
unsigned int size_B = sizeof(Complex)* count_B;
unsigned int size_C = sizeof(double)* count_C;
// Allocate device memory for signal
Complex* d_A;
cudaMalloc((void**)&d_A, size_A);
// Copy host memory to device
cudaMemcpy(d_A, h_A, size_A,
cudaMemcpyHostToDevice);
// Allocate device memory for filter kernel
Complex* d_B;
cudaMalloc((void**)&d_B, size_B);
// Copy host memory to device
cudaMemcpy(d_B, h_B, size_B,
cudaMemcpyHostToDevice);
double *d_C;
cudaMalloc((void**)&d_C, count_C * sizeof(double));
double *h_C = (double *)malloc(count_C * sizeof(double));
// CUFFT plan
cufftHandle planA, planB;
cufftPlan1d(&planA, count_A, CUFFT_C2C, 1);
cufftPlan1d(&planB, count_B, CUFFT_C2C, 1);
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
cufftExecC2C(planA, (cufftComplex *)d_A, (cufftComplex *)d_A, CUFFT_FORWARD);
cufftExecC2C(planB, (cufftComplex *)d_B, (cufftComplex *)d_B, CUFFT_FORWARD);
// Multiply the coefficients together and normalize the result
printf("Launching compareKernel<<< >>>\n");
compareKernel << <32, 256 >> >(d_A, d_B, d_C, count_A, count_B);
// Transform signal back
printf("Transforming signal back cufftExecC2C\n");
//cufftExecC2C(plan, (cufftComplex *)d_A, (cufftComplex *)d_A, CUFFT_INVERSE);
// Copy device memory to host
int Min_size = sizeof(Complex) * count_A;
Complex* h_convolved_signal = (Complex*)malloc(sizeof(Complex) * count_A);
cudaMemcpy(h_convolved_signal, d_A, size_A,
cudaMemcpyDeviceToHost);
/*
for (int i = 0; i < count_A; i++) {
printf("FFT is %f %f \n", h_convolved_signal[i].x, h_convolved_signal[i].y);
}
*/
cudaMemcpy(h_C, d_C, count_C * sizeof(double), cudaMemcpyDeviceToHost);
printf("------Count IS: %d - %d = %d \n", count_A, count_B, count_C);
double min = 999999999.9;
for (int i = 0; i < count_C; i++) {
if (h_C[i] < min) {
min = h_C[i];
}
printf("------LAD IS: %f \n", h_C[i]);
}
//*a = h_C[(count_A - count_B) + 1];
*a = min;
printf("Min is: %f \n", min);
// Allocate host memory for the convolution result
//Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * size_A);
// Convolve on the host
//Destroy CUFFT context
cufftDestroy(planA);
cufftDestroy(planB);
// cleanup memory
h_A = NULL;
h_B = NULL;
h_C = NULL;
free(h_A);
free(h_B);
free(h_C);
h_A = NULL;
h_B = NULL;
h_C = NULL;
//free(h_padded_signal);
//free(h_padded_filter_kernel);
//free(h_convolved_signal_ref);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Clean up memory
return EXIT_SUCCESS;
}
void makeZero(double *a) {
*a = 0;
}
int readFile(int **grades, char *addr) {
FILE *fp;
int temp;
//grades = NULL;
int count = 1;
long index;
fp = fopen(addr, "rb+");
while (fscanf(fp, "%d", &temp) != EOF)
{
if (*grades == NULL)
{
*grades = (int *)malloc(sizeof(temp));
**grades = temp;
printf("The grade is %d\r\n", temp);
}
else
{
//printf("The grade is realloc %d\r\n", temp);
count++;
*grades = (int *)realloc(*grades, sizeof(int)*count);
index = count - 1;
(*grades)[index] = temp;
//printf("the index is %d\r\n",index);
}
}
printf("Done Total %d numbers \n", count);
fclose(fp);
temp = 0;
/*
while (index >= 0)
{
printf("the read value is %d\r\n", (*grades)[temp]);
index--;
temp++;
} */
return(count);
}
void concatenate_string(char *original, char *add)
{
while (*original)
original++;
while (*add)
{
*original = *add;
add++;
original++;
}
*original = '\0';
}
/**
* Program main
*/
int main(int argc, char **argv)
{
//char **strings1 = (char**)malloc(10 * sizeof(char*));
char arr1[10][30];
FILE * database;
char buffer1[30];
int Count1 = 0;
database = fopen("SongNames.txt", "r");
if (NULL == database)
{
perror("opening database");
return (-1);
}
while (EOF != fscanf(database, "%[^\n]\n", buffer1))
{
//printf("> %s\n", buffer1);
strcpy(arr1[Count1], buffer1);
Count1++;
}
fclose(database);
char arr2[10][30];
FILE * database2;
char buffer2[30];
int Count2 = 0;
database2 = fopen("SampleNames.txt", "r");
if (NULL == database2)
{
perror("opening database");
return (-1);
}
while (EOF != fscanf(database2, "%[^\n]\n", buffer2))
{
//printf("> %s\n", buffer2);
strcpy(arr2[Count2], buffer2);
Count2++;
}
fclose(database2);
for (int i = 0; i < Count1; i++) {
for (int j = 0; j < Count2; j++) {
double a = 0.0;
char path1[30] = "songs/";
char path2[30] = "samples/";
concatenate_string(path1, arr1[i]);
concatenate_string(path2, arr2[j]);
makeZero(&a);
CompareWav(path1, path2, &a);
printf("\%s >>> %s : Minimun LAD: %f\n", arr1[i], arr2[j], a);
}
}
}
|
bb6c5d8bf0de441ff09c386ac079d5e458c06e9a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* @author Oded Green <br>
* NVIDIA
* @date July, 2019
* @version v2
*
* @copyright Copyright 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Static/KTruss/KTruss.cuh"
#include <StandardAPI.hpp>
#include <Device/Util/Timer.cuh>
#include <Graph/GraphStd.hpp>
// #include "Hornet.hpp" // Shouldn't this be done by default?
using namespace hornets_nest;
int exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
using namespace graph;
using namespace hornets_nest;
using namespace timer;
hipSetDevice(0);
GraphStd<vert_t, vert_t> graph(UNDIRECTED);
graph.read(argv[1], SORT | PRINT_INFO );
HornetInit hornet_init(graph.nV(), graph.nE(),
graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_gpu(hornet_init);
vert_t* gpuOffset;
gpu::allocate(gpuOffset, graph.nV()+1);
hipMemcpy(gpuOffset,graph.csr_out_offsets(),sizeof(vert_t)*(graph.nV()+1), hipMemcpyHostToDevice);
// int temp;
// int temp2=scanf("%d",&temp);
// printf("%d %d\n",temp+1,temp2);
KTruss ktruss (hornet_gpu);
ktruss.init();
ktruss.reset();
ktruss.copyOffsetArrayHost(graph.csr_out_offsets());
// ktruss.setInitParameters(1, 32, 0, 64000, 32);
// ktruss.createOffSetArray();
ktruss.setInitParameters(4, 8, 2, 64000, 32);
Timer<DEVICE> TM;
ktruss.reset();
TM.start();
ktruss.run();
TM.stop();
auto total_time = TM.duration();
TM.print("Time to find the k-truss");
std::cout << "The Maximal K-Truss is : " << ktruss.getMaxK() << std::endl;
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
// #if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
// #endif
ret = exec(argc, argv);
// #if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
// #endif
return ret;
}
| bb6c5d8bf0de441ff09c386ac079d5e458c06e9a.cu | /**
* @brief
* @author Oded Green <br>
* NVIDIA
* @date July, 2019
* @version v2
*
* @copyright Copyright © 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Static/KTruss/KTruss.cuh"
#include <StandardAPI.hpp>
#include <Device/Util/Timer.cuh>
#include <Graph/GraphStd.hpp>
// #include "Hornet.hpp" // Shouldn't this be done by default?
using namespace hornets_nest;
int exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
using namespace graph;
using namespace hornets_nest;
using namespace timer;
cudaSetDevice(0);
GraphStd<vert_t, vert_t> graph(UNDIRECTED);
graph.read(argv[1], SORT | PRINT_INFO );
HornetInit hornet_init(graph.nV(), graph.nE(),
graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_gpu(hornet_init);
vert_t* gpuOffset;
gpu::allocate(gpuOffset, graph.nV()+1);
cudaMemcpy(gpuOffset,graph.csr_out_offsets(),sizeof(vert_t)*(graph.nV()+1), cudaMemcpyHostToDevice);
// int temp;
// int temp2=scanf("%d",&temp);
// printf("%d %d\n",temp+1,temp2);
KTruss ktruss (hornet_gpu);
ktruss.init();
ktruss.reset();
ktruss.copyOffsetArrayHost(graph.csr_out_offsets());
// ktruss.setInitParameters(1, 32, 0, 64000, 32);
// ktruss.createOffSetArray();
ktruss.setInitParameters(4, 8, 2, 64000, 32);
Timer<DEVICE> TM;
ktruss.reset();
TM.start();
ktruss.run();
TM.stop();
auto total_time = TM.duration();
TM.print("Time to find the k-truss");
std::cout << "The Maximal K-Truss is : " << ktruss.getMaxK() << std::endl;
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
// #if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
// #endif
ret = exec(argc, argv);
// #if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
// #endif
return ret;
}
|
0ace8f506d0a44551ea5a570e383ed41c959a148.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpuMM(float *A, float *B, float *C, int N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.0;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
// if(row%50 ==5)
// printf("%f \t %f \t %f\n",A[row*N+col], B[row*N+col], C[row*N+col]);
} | 0ace8f506d0a44551ea5a570e383ed41c959a148.cu | #include "includes.h"
__global__ void gpuMM(float *A, float *B, float *C, int N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.0;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
// if(row%50 ==5)
// printf("%f \t %f \t %f\n",A[row*N+col], B[row*N+col], C[row*N+col]);
} |
183e450067a698e885e536d0b369bc5aca54e175.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common/helpers.h"
#define SIZE (1024 * 1024 * 1024)
__global__ void histo_kernel(unsigned char *buffer, long size, unsigned int *histo) {
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&temp[buffer[i]], 1);
i += stride;
}
__syncthreads();
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}
int main(void) {
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR(hipMalloc((void**)&dev_buffer, SIZE));
HANDLE_ERROR(hipMemcpy(dev_buffer, buffer, SIZE, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void**)&dev_histo, 256 * sizeof(long)));
HANDLE_ERROR(hipMemset(dev_histo, 0, 256 * sizeof(int)));
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel), dim3(blocks * 2), dim3(256), 0, 0, dev_buffer, SIZE, dev_histo);
unsigned int histo[256];
HANDLE_ERROR(hipMemcpy(histo, dev_histo, 256 * sizeof(int), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms \n", elapsedTime);
long histoCount = 0 ;
for (int i = 0; i < 256; i++) {
histoCount += histo[i];
}
printf("Histogram Sum: %ld\n", histoCount);
for (int i = 0; i < SIZE; i++) {
histo[buffer[i]]--;
}
for (int i = 0; i < 256; i++) {
if (histo[i] != 0 ) {
printf("Failure at %d! \n", i);
}
}
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
hipFree(dev_histo);
hipFree(dev_buffer);
free(buffer);
return 0;
}
| 183e450067a698e885e536d0b369bc5aca54e175.cu | #include "./common/helpers.h"
#define SIZE (1024 * 1024 * 1024)
__global__ void histo_kernel(unsigned char *buffer, long size, unsigned int *histo) {
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&temp[buffer[i]], 1);
i += stride;
}
__syncthreads();
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}
int main(void) {
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR(cudaMalloc((void**)&dev_buffer, SIZE));
HANDLE_ERROR(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void**)&dev_histo, 256 * sizeof(long)));
HANDLE_ERROR(cudaMemset(dev_histo, 0, 256 * sizeof(int)));
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
histo_kernel<<<blocks * 2, 256>>>(dev_buffer, SIZE, dev_histo);
unsigned int histo[256];
HANDLE_ERROR(cudaMemcpy(histo, dev_histo, 256 * sizeof(int), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time to generate: %3.1f ms \n", elapsedTime);
long histoCount = 0 ;
for (int i = 0; i < 256; i++) {
histoCount += histo[i];
}
printf("Histogram Sum: %ld\n", histoCount);
for (int i = 0; i < SIZE; i++) {
histo[buffer[i]]--;
}
for (int i = 0; i < 256; i++) {
if (histo[i] != 0 ) {
printf("Failure at %d! \n", i);
}
}
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
cudaFree(dev_histo);
cudaFree(dev_buffer);
free(buffer);
return 0;
}
|
f209d3093e3db25a359ff0114bc308d05fee9bc8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
//TDB
//
//
//VBO
//
//
#define PI 3.141592653589793
//
#define cap 1000
#define ref 0.2
#define temp 3273
#define visc 9
#define GRAV (6.674*0.00000000000000000001)
#define density (2.5 * 1000000000000)
//
#define rad 50 //km
#define M (4 / 3 * PI * rad*rad*rad* density)//kg
//
#define dev 10
#define X 0
#define Y 1
#define Z 2
#define ANIM_START 0
#define ANIM 100
#define scale 0.01
#define colmargin 1.1
#define R (rad * scale)
#define INIT_WIDTH 800
#define INIT_HEIGHT 800
#define vision 40
#define Grid_x 32//block__syncthread
#define Grid_y 8
#define Grid_z 1
#define Block_x 2
#define Block_y 2
#define Block_z 1
#define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z)
unsigned int num_points = (dev + 1) * (dev + 1);
unsigned int window_width = INIT_WIDTH;
unsigned int window_height = INIT_HEIGHT;
double vision_size = vision;
float right_motion=0;
float up_motion=0;
double left, right, bottom, top;
float h_point[NUM_POINTS][3];
float v_point[NUM_POINTS][3];
float st_point[NUM_POINTS];
float e_point[NUM_POINTS];
float J_point[NUM_POINTS];
float h_buff[NUM_POINTS][3]={0};
float anim_time = ANIM_START;
float anim_dt = ANIM;
double phi = 30.0;
double theta = 30.0;
float light_pos[4];
int mouse_old_x, mouse_old_y;
bool motion_p;
bool motion_w;
double eye[3];
double center[3] = {0.0, 0.0, 0.0};
double up[3];
double ** point;
float (*d_point)[3];
float (*dv_point)[3];
float (*dst_point);
float (*de_point);
float (*dJ_point);
float (*v_buff)[3];
float colsynctime[NUM_POINTS][NUM_POINTS]={0};
int colsyncindex[NUM_POINTS][NUM_POINTS]={0};
float (*dcolsynctime)[NUM_POINTS];
int (*dcolsyncindex)[NUM_POINTS];
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]);
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]);
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_p(float (*pos)[3], float(*vec)[3]);
//
double dot(double vec0[], double vec1[])
{
return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]);
}
void cross(double vec0[], double vec1[], double vec2[])
{
vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y];
vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z];
vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X];
}
void normVec(double vec[])
{
double norm;
norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]);
vec[X] /= norm;
vec[Y] /= norm;
vec[Z] /= norm;
}
void normal(double p0[], double p1[], double p2[], double normal[])
{
unsigned int i;
double v0[3], v1[3];
for (i = 0; i < 3; i++) {
v0[i] = p2[i] - p1[i];
v1[i] = p0[i] - p1[i];
}
cross(v0, v1, normal);
normVec(normal);
}
//
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,vx,vy,vz,dis,sq;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
float rvec[3]={0};
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++)
{
sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
dis = (float)sqrt((double)sq);
rvec[0]=(pos[i][0]-xn)/dis;
rvec[1]=(pos[i][1]-yn)/dis;
rvec[2]=(pos[i][2]-zn)/dis;
//
if (dis > 2 * R * colmargin && i != index)
{
colindex[index][i]=NUM_POINTS;
}
else if (dis <= 2 * R * colmargin && i != index)
{
//
colindex[index][i]=i;
coltime[index][i]=(2*R*colmargin-dis)/((vx-vec[i][0])*rvec[0]+(vy-vec[i][1])*rvec[1]+(vz-vec[i][2])*rvec[2]);
}
else
{
colindex[index][i]=NUM_POINTS;
}
}
}
//
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
float tmptime=0;
int tmpindex=0;
int coldex=0;
float repul=0;
float rvec[3]={0};
float Vl[3]={0};
float Vr[3]={0};
float Vh[3]={0};
float vl_buff[3]={0};
float vr_buff[3]={0};
float vcol_buff[3]={0};
float dotV=0;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vl_buff[0]=vec[index][0];
vl_buff[1]=vec[index][1];
vl_buff[2]=vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum>0){
//
for (int i = 0 ; i < NUM_POINTS; i++){
for(int j = i+1; j < NUM_POINTS; j++){
if(coltime[index][i] > coltime[index][j]){
tmptime=coltime[index][i];
tmpindex=colindex[index][i];
coltime[index][i]=coltime[index][j];
colindex[index][i]=colindex[index][j];
coltime[index][j]=tmptime;
colindex[index][j]=tmpindex;
}
}
}
//
for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){
coldex=colindex[index][i];
sq = (float)pow((double)(xn-pos[coldex][0]),2)+pow((double)(yn-pos[coldex][1]),2)+pow((double)(zn-pos[coldex][2]),2);
dis = (float)sqrt((double)sq);
//
rvec[0]=(pos[coldex][0]-xn)/dis;
rvec[1]=(pos[coldex][1]-yn)/dis;
rvec[2]=(pos[coldex][2]-zn)/dis;
//
dotV=rvec[0]*vl_buff[0]+rvec[1]*vl_buff[1]+rvec[2]*vl_buff[2];
Vl[0]=dotV*rvec[0];
Vl[1]=dotV*rvec[1];
Vl[2]=dotV*rvec[2];
//
dotV=rvec[0]*vec[coldex][0]+rvec[1]*vec[coldex][1]+rvec[2]*vec[coldex][2];
Vr[0]=dotV*rvec[0];
Vr[1]=dotV*rvec[1];
Vr[2]=dotV*rvec[2];
//
Vh[0]=vl_buff[0]-Vl[0];
Vh[1]=vl_buff[1]-Vl[1];
Vh[2]=vl_buff[2]-Vl[2];
//
repul=e[index];
if (e[coldex] < e[index]) {
repul=e[coldex];
}
//
vcol_buff[0]=Vh[0]+((1+repul)*Vr[0]+(1-repul)*Vl[0])/2;
vcol_buff[1]=Vh[1]+((1+repul)*Vr[1]+(1-repul)*Vl[1])/2;
vcol_buff[2]=Vh[2]+((1+repul)*Vr[2]+(1-repul)*Vl[2])/2;
//
vr_buff[0]=vec[coldex][0]-Vr[0]+((1+repul)*Vl[0]+(1-repul)*Vr[0])/2;
vr_buff[1]=vec[coldex][1]-Vr[1]+((1+repul)*Vl[1]+(1-repul)*Vr[1])/2;
vr_buff[2]=vec[coldex][2]-Vr[2]+((1+repul)*Vl[2]+(1-repul)*Vr[2])/2;
// TBD
double Energy=0.5*M*(pow(vec[coldex][0],2)+pow(vec[coldex][1],2)+pow(vec[coldex][2],2)+pow(vl_buff[0],2)+pow(vl_buff[1],2)+pow(vl_buff[2],2) - (pow(vcol_buff[0],2)+pow(vcol_buff[1],2)+pow(vcol_buff[2],2)+pow(vr_buff[0],2)+pow(vr_buff[1],2)+pow(vr_buff[2],2))) / pow(scale,2) * 1000000;
J[index] += Energy / (pow(10.0,(double)(sti[index]-sti[coldex]))+1);
vl_buff[0]=vcol_buff[0];
vl_buff[1]=vcol_buff[1];
vl_buff[2]=vcol_buff[2];
// 1001
e[index] = 1 - ((1-ref)/temp * J[index]/M/cap);
if ( e[index] < 0 ){ e[index] = 0; }
if ( e[index] > 1 ){ e[index] = 1; }
sti[index] = visc - ((J[index]/M/cap - temp) / 100);
}
v_buff[index][0]=vl_buff[0];
v_buff[index][1]=vl_buff[1];
v_buff[index][2]=vl_buff[2];
}
}
//
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,vx,vy,vz,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
int colchk=0;
float gravity=0;
//float rvec[3]={0};
//float dotV=0;
//float dotVcol=0;
//float v_add[3]={0};
//float vcol_add[3]={0};
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum==0){
//
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++){
if (i!=index) {
sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale;
}
}
}
else {
//
vx = v_buff[index][0];
vy = v_buff[index][1];
vz = v_buff[index][2];
}
/*
for (int i = 0 ; i < NUM_POINTS; i++){
colchk=0;
for (int j = NUM_POINTS-1 ; j >= 0; j--){
if(colindex[index][j]==i){
colchk=1;
break;
}
}
sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
if (i!=index && colchk==0) {
vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale;
}
}
}
//
for (int i = 0 ; i < NUM_POINTS; i++){
colchk=0;
for (int j = NUM_POINTS-1 ; j >= 0; j--){
if(colindex[index][j]==i){
colchk=1;
break;
}
}
if (i!=index && colchk==1) {
vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale;
}
}
rvec[0]=vcol_add[0]/sqrt(pow(vcol_add[0],2)+pow(vcol_add[1],2)+pow(vcol_add[0],2));
rvec[1]=vcol_add[1]/sqrt(pow(vcol_add[0],2)+pow(vcol_add[1],2)+pow(vcol_add[0],2));
rvec[2]=vcol_add[2]/sqrt(pow(vcol_add[0],2)+pow(vcol_add[1],2)+pow(vcol_add[0],2));
dotV=rvec[0]*v_add[0]+rvec[1]*v_add[1]+rvec[2]*v_add[2];
//dotVcol=rvec[0]*vcol_add[0]+rvec[1]*vcol_add[1]+rvec[2]*vcol_add[2];
if(dotV>=0){
vx+=v_add[0];
vy+=v_add[1];
vz+=v_add[2];
}
else if(-dotV>=dotVcol){
vx+=v_add[0]+rvec[0]*dotVcol;
vy+=v_add[1]+rvec[1]*dotVcol;
vz+=v_add[2]+rvec[2]*dotVcol;
}
else{
vx+=v_add[0]+rvec[0]*dotV;
vy+=v_add[1]+rvec[1]*dotV;
vz+=v_add[2]+rvec[2]*dotV;
}
*/
v_buff[index][0] = vx;
v_buff[index][1] = vy;
v_buff[index][2] = vz;
}
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
vec[index][0]=v_buff[index][0];
vec[index][1]=v_buff[index][1];
vec[index][2]=v_buff[index][2];
}
//
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
for (int i=0; i < 3; i++){
v_buff[index][i]=0;
}
for (int i=0; i < NUM_POINTS; i++){
coltime[index][i]=0;
colindex[index][i]=NUM_POINTS;
}
}
//
__global__ void grav_p(float(*pos)[3], float(*vec)[3])
{
float xn,yn,zn,vx,vy,vz;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
pos[index][0] = xn + vx * ANIM;
pos[index][1] = yn + vy * ANIM;
pos[index][2] = zn + vz * ANIM;
}
//
void setInitialPosition(void)
{
for (int i = 0; i < NUM_POINTS; i++) {
for (int j = 0 ; j < 3 ; j++){
h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision*2 ;
v_point[i][j] = 0;
h_buff[i][j] = 0;
}
st_point[i]=visc;
e_point[i]=ref;
J_point[i]=cap*M*temp;
for (int j = 0; j < NUM_POINTS; j++) {
colsyncindex[i][j]=NUM_POINTS;
}
}
checkCudaErrors(hipMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&v_buff, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dst_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&de_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int)));
checkCudaErrors(hipMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(v_buff, h_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dcolsynctime, colsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dcolsyncindex, colsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , hipMemcpyHostToDevice));
}
//CUDA
void launchGPUKernel(unsigned int num_particles,float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
dim3 grid(Grid_x,Grid_y,Grid_z);
dim3 block(Block_x,Block_y,Block_z);
hipLaunchKernelGGL(( grav_coldetect), dim3(grid) , dim3(block), 0, 0, pos, vec,coltime,colindex);
hipLaunchKernelGGL(( grav_colv), dim3(grid) , dim3(block), 0, 0, pos,vec,v_buff,sti,e,J,coltime,colindex);
hipLaunchKernelGGL(( grav_v), dim3(grid) , dim3(block), 0, 0, pos,vec,v_buff,colindex);
hipLaunchKernelGGL(( grav_vupdate), dim3(grid) , dim3(block), 0, 0, vec,v_buff);
hipLaunchKernelGGL(( buff_clear), dim3(grid) , dim3(block), 0, 0, v_buff,coltime,colindex);
hipLaunchKernelGGL(( grav_p), dim3(grid) , dim3(block), 0, 0, pos,vec);
}
//
void runGPUKernel(void)
{
launchGPUKernel(NUM_POINTS, d_point, dv_point,v_buff,dst_point, de_point,dJ_point,dcolsynctime,dcolsyncindex);
checkCudaErrors(hipMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_buff, v_buff, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(colsynctime,dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(colsyncindex,dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , hipMemcpyDeviceToHost));
anim_time += anim_dt;
}
//
void defineViewMatrix(double phi, double theta)
{
unsigned int i;
double c, s, xy_dist;
double x_axis[3], y_axis[3], z_axis[3];
//
eye[Z] = sin(theta * PI / 180.0);
xy_dist = cos(theta * PI / 180.0);
c = cos(phi * PI / 180.0);
s = sin(phi * PI / 180.0);
eye[X] = xy_dist * c;
eye[Y] = xy_dist * s;
up[X] = - c * eye[Z];
up[Y] = - s * eye[Z];
up[Z] = s * eye[Y] + c * eye[X];
normVec(up);
//
for (i = 0; i < 3; i++)
{
z_axis[i] = eye[i] - center[i];
}
normVec(z_axis);
cross(up, z_axis, x_axis);
normVec(x_axis);
cross(z_axis, x_axis, y_axis);
gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]);
}
void display(void)
{
double nrml_vec[3];
light_pos[0] = (float)eye[X];
light_pos[1] = (float)eye[Y];
light_pos[2] = (float)eye[Z];
light_pos[3] = 0.0f;
//CUDA
runGPUKernel();
//
glLightfv(GL_LIGHT0, GL_POSITION, light_pos);
//glEnable(GL_LIGHTING);
glMatrixMode(GL_PROJECTION);
//glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000);
glLoadIdentity();
glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size);
glViewport(0, 0, window_width, window_height);
defineViewMatrix(phi, theta);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_QUADS);
//printf("%f,%f,%f\n",h_point[0][0],h_point[0][1],h_point[0][2]);
//printf("%f,%f,%f\n",J_point[0]/M/cap,J_point[0]/M/cap,J_point[2]/M/cap);
// TBD
for (int k = 0 ; k < NUM_POINTS ; k++)
{
//
if(J_point[k]/M/cap>10000){
glColor3f(0.75f,1.0f,1.0f);
}
else if(J_point[k]/M/cap>8000){
glColor3f((GLfloat)(1.0-0.25/2000*(J_point[k]/M/cap-8000)),1.0f,1.0f);
}
else if(J_point[k]/M/cap>6000){
glColor3f(1.0f,(GLfloat)(0.75+0.25/2000*(J_point[k]/M/cap-6000)),(GLfloat)(1.0/2000*(J_point[k]/M/cap-6000)));
}
else if(J_point[k]/M/cap>4000){
glColor3f(1.0f,(GLfloat)(0.25+0.5/2000*(J_point[k]/M/cap-4000)),0.0f);
}
else if(J_point[k]/M/cap>2000){
glColor3f((GLfloat)(0.5/2000*J_point[k]/M/cap),0.25f,0.0f);
}
else{
glColor3f((GLfloat)(0.5/2000*J_point[k]/M/cap),0.25/2000*J_point[k]/M/cap,0.0f);
}
for (int i = 0 ; i < dev + 1 ; i ++)
{
for (int j = 0 ; j < 2 * dev + 1 ; j++)
{
normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec);
glNormal3dv(nrml_vec);
glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]);
glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]);
}
}
}
glEnd();
glutSwapBuffers();
glutPostRedisplay();
}
void mouse_button(int button, int state, int x, int y)
{
if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON))
motion_p = true;
if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON))
motion_w = true;
else if (state == GLUT_UP) {
motion_p = false;
motion_w = false;
}
mouse_old_x = x;
mouse_old_y = y;
}
void mouse_motion(int x, int y)
{
int dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (motion_p) {
phi -= dx * 0.2;
theta += dy * 0.2;
}
if (motion_w) {
right_motion += dx ;
up_motion -= dy ;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void resize(int width, int height)
{
window_width = width;
window_height = height;
}
void keyboard(unsigned char key, int x, int y)
{
switch (key) {
case 'q':
case 'Q':
case '\033':
exit(0);
default:
break;
}
}
bool initGL(void)
{
glClearColor(0.0f, 0.0f , 0.0f, 0.5f);
glEnable(GL_DEPTH_TEST);
glClearDepth(1.0);
glDepthFunc(GL_LESS);
glEnable(GL_LIGHT0);
return true;
}
int main(int argc, char** argv)
{
double yangle,zangle;
double r;
point = (double **)malloc(sizeof(double *) * num_points);
for (int i = 0 ; i < num_points ; i++)
{
point[i] = (double *)malloc(sizeof(double) * 3);
}
for (int i = 0 ; i < dev + 1; i ++)
{
zangle = i * PI / dev;
r=R * sin(zangle);
for (int j = 0 ; j < dev + 1; j++)
{
yangle=j * PI * 2 / dev;
point[i * dev + j][X] = r * sin(yangle);
point[i * dev + j][Y] = r * cos(yangle);
point[i * dev + j][Z] = R * cos(zangle);
}
}
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("3D CUDA Simulation");
glutDisplayFunc(display);
glutReshapeFunc(resize);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
setInitialPosition();
if (!initGL())
return 1;
glutMainLoop();
hipFree(dst_point);
hipFree(de_point);
hipFree(dJ_point);
for (int i = 0 ; i < num_points ; i++)
{
free (point[i]);
hipFree(d_point[i]);
hipFree(dv_point[i]);
hipFree(v_buff[i]);
hipFree(dcolsynctime[i]);
hipFree(dcolsyncindex[i]);
}
free (point);
hipFree(d_point);
hipFree(dv_point);
hipFree(v_buff);
hipFree(dcolsynctime);
hipFree(dcolsyncindex);
hipDeviceReset();
return 0;
}
| f209d3093e3db25a359ff0114bc308d05fee9bc8.cu | #include <stdio.h>
#include <stdlib.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#include <cuda.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
//TDB
//放射冷却
//メタボール
//VBO
//初期座標を球形にする
//衝突角度や距離をスライドで変更できるようにする
#define PI 3.141592653589793
//物理パラメータ
#define cap 1000
#define ref 0.2
#define temp 3273
#define visc 9
#define GRAV (6.674*0.00000000000000000001)
#define density (2.5 * 1000000000000)
//粒子形状
#define rad 50 //km
#define M (4 / 3 * PI * rad*rad*rad* density)//kg
//描写設定
#define dev 10
#define X 0
#define Y 1
#define Z 2
#define ANIM_START 0
#define ANIM 100
#define scale 0.01
#define colmargin 1.1
#define R (rad * scale)
#define INIT_WIDTH 800
#define INIT_HEIGHT 800
#define vision 40
#define Grid_x 32//block間は__syncthreadでは同期不可
#define Grid_y 8
#define Grid_z 1
#define Block_x 2
#define Block_y 2
#define Block_z 1
#define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z)
unsigned int num_points = (dev + 1) * (dev + 1);
unsigned int window_width = INIT_WIDTH;
unsigned int window_height = INIT_HEIGHT;
double vision_size = vision;
float right_motion=0;
float up_motion=0;
double left, right, bottom, top;
float h_point[NUM_POINTS][3];
float v_point[NUM_POINTS][3];
float st_point[NUM_POINTS];
float e_point[NUM_POINTS];
float J_point[NUM_POINTS];
float h_buff[NUM_POINTS][3]={0};
float anim_time = ANIM_START;
float anim_dt = ANIM;
double phi = 30.0;
double theta = 30.0;
float light_pos[4];
int mouse_old_x, mouse_old_y;
bool motion_p;
bool motion_w;
double eye[3];
double center[3] = {0.0, 0.0, 0.0};
double up[3];
double ** point;
float (*d_point)[3];
float (*dv_point)[3];
float (*dst_point);
float (*de_point);
float (*dJ_point);
float (*v_buff)[3];
float colsynctime[NUM_POINTS][NUM_POINTS]={0};
int colsyncindex[NUM_POINTS][NUM_POINTS]={0};
float (*dcolsynctime)[NUM_POINTS];
int (*dcolsyncindex)[NUM_POINTS];
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS]);
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3]);
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS]);
__global__ void grav_p(float (*pos)[3], float(*vec)[3]);
//基本関数群
double dot(double vec0[], double vec1[])
{
return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]);
}
void cross(double vec0[], double vec1[], double vec2[])
{
vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y];
vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z];
vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X];
}
void normVec(double vec[])
{
double norm;
norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]);
vec[X] /= norm;
vec[Y] /= norm;
vec[Z] /= norm;
}
void normal(double p0[], double p1[], double p2[], double normal[])
{
unsigned int i;
double v0[3], v1[3];
for (i = 0; i < 3; i++) {
v0[i] = p2[i] - p1[i];
v1[i] = p0[i] - p1[i];
}
cross(v0, v1, normal);
normVec(normal);
}
//衝突検知
__global__ void grav_coldetect(float(*pos)[3],float(*vec)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,vx,vy,vz,dis,sq;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
float rvec[3]={0};
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++)
{
sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
dis = (float)sqrt((double)sq);
rvec[0]=(pos[i][0]-xn)/dis;
rvec[1]=(pos[i][1]-yn)/dis;
rvec[2]=(pos[i][2]-zn)/dis;
//衝突域侵入判定
if (dis > 2 * R * colmargin && i != index)
{
colindex[index][i]=NUM_POINTS;
}
else if (dis <= 2 * R * colmargin && i != index)
{
//衝突域侵入からの経過の時間を記録
colindex[index][i]=i;
coltime[index][i]=(2*R*colmargin-dis)/((vx-vec[i][0])*rvec[0]+(vy-vec[i][1])*rvec[1]+(vz-vec[i][2])*rvec[2]);
}
else
{
colindex[index][i]=NUM_POINTS;
}
}
}
//衝突後の速度を計算
__global__ void grav_colv(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
float tmptime=0;
int tmpindex=0;
int coldex=0;
float repul=0;
float rvec[3]={0};
float Vl[3]={0};
float Vr[3]={0};
float Vh[3]={0};
float vl_buff[3]={0};
float vr_buff[3]={0};
float vcol_buff[3]={0};
float dotV=0;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vl_buff[0]=vec[index][0];
vl_buff[1]=vec[index][1];
vl_buff[2]=vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum>0){
//衝突域侵入からの経過時間をインデックス付きソート
for (int i = 0 ; i < NUM_POINTS; i++){
for(int j = i+1; j < NUM_POINTS; j++){
if(coltime[index][i] > coltime[index][j]){
tmptime=coltime[index][i];
tmpindex=colindex[index][i];
coltime[index][i]=coltime[index][j];
colindex[index][i]=colindex[index][j];
coltime[index][j]=tmptime;
colindex[index][j]=tmpindex;
}
}
}
//衝突域侵入からの経過時間が長いものから処理
for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){
coldex=colindex[index][i];
sq = (float)pow((double)(xn-pos[coldex][0]),2)+pow((double)(yn-pos[coldex][1]),2)+pow((double)(zn-pos[coldex][2]),2);
dis = (float)sqrt((double)sq);
//衝突の運動量の単位ベクトル
rvec[0]=(pos[coldex][0]-xn)/dis;
rvec[1]=(pos[coldex][1]-yn)/dis;
rvec[2]=(pos[coldex][2]-zn)/dis;
//自分の速度ベクトルの法線成分
dotV=rvec[0]*vl_buff[0]+rvec[1]*vl_buff[1]+rvec[2]*vl_buff[2];
Vl[0]=dotV*rvec[0];
Vl[1]=dotV*rvec[1];
Vl[2]=dotV*rvec[2];
//相手の速度ベクトルの法線成分
dotV=rvec[0]*vec[coldex][0]+rvec[1]*vec[coldex][1]+rvec[2]*vec[coldex][2];
Vr[0]=dotV*rvec[0];
Vr[1]=dotV*rvec[1];
Vr[2]=dotV*rvec[2];
//自分の速度ベクトルの水平成分
Vh[0]=vl_buff[0]-Vl[0];
Vh[1]=vl_buff[1]-Vl[1];
Vh[2]=vl_buff[2]-Vl[2];
//反発係数は小さいほうを優先
repul=e[index];
if (e[coldex] < e[index]) {
repul=e[coldex];
}
//速度更新
vcol_buff[0]=Vh[0]+((1+repul)*Vr[0]+(1-repul)*Vl[0])/2;
vcol_buff[1]=Vh[1]+((1+repul)*Vr[1]+(1-repul)*Vl[1])/2;
vcol_buff[2]=Vh[2]+((1+repul)*Vr[2]+(1-repul)*Vl[2])/2;
//相手の速度計算
vr_buff[0]=vec[coldex][0]-Vr[0]+((1+repul)*Vl[0]+(1-repul)*Vr[0])/2;
vr_buff[1]=vec[coldex][1]-Vr[1]+((1+repul)*Vl[1]+(1-repul)*Vr[1])/2;
vr_buff[2]=vec[coldex][2]-Vr[2]+((1+repul)*Vl[2]+(1-repul)*Vr[2])/2;
//衝突エネルギーを粘性の比で分配し熱エネルギー変換 TBD 放射冷却
double Energy=0.5*M*(pow(vec[coldex][0],2)+pow(vec[coldex][1],2)+pow(vec[coldex][2],2)+pow(vl_buff[0],2)+pow(vl_buff[1],2)+pow(vl_buff[2],2) - (pow(vcol_buff[0],2)+pow(vcol_buff[1],2)+pow(vcol_buff[2],2)+pow(vr_buff[0],2)+pow(vr_buff[1],2)+pow(vr_buff[2],2))) / pow(scale,2) * 1000000;
J[index] += Energy / (pow(10.0,(double)(sti[index]-sti[coldex]))+1);
vl_buff[0]=vcol_buff[0];
vl_buff[1]=vcol_buff[1];
vl_buff[2]=vcol_buff[2];
//粘性と反発係数の更新 反発係数は温度上昇に対し線形に降下、粘性は100度上昇で1桁降下
e[index] = 1 - ((1-ref)/temp * J[index]/M/cap);
if ( e[index] < 0 ){ e[index] = 0; }
if ( e[index] > 1 ){ e[index] = 1; }
sti[index] = visc - ((J[index]/M/cap - temp) / 100);
}
v_buff[index][0]=vl_buff[0];
v_buff[index][1]=vl_buff[1];
v_buff[index][2]=vl_buff[2];
}
}
//重力影響後の速度を計算
__global__ void grav_v(float(*pos)[3],float(*vec)[3],float(*v_buff)[3],int(*colindex)[NUM_POINTS])
{
float xn,yn,zn,vx,vy,vz,sq,dis;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
int colnum=0;
int colchk=0;
float gravity=0;
//float rvec[3]={0};
//float dotV=0;
//float dotVcol=0;
//float v_add[3]={0};
//float vcol_add[3]={0};
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
for (int i = 0 ; i < NUM_POINTS; i++){
if(colindex[index][i]!=NUM_POINTS){
colnum++;
}
}
if(colnum==0){
//衝突なしなら自分以外のすべてから重力影響を受ける
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++){
if (i!=index) {
sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale;
}
}
}
else {
//衝突ありなら自分と衝突対象以外から重力影響を受ける
vx = v_buff[index][0];
vy = v_buff[index][1];
vz = v_buff[index][2];
}
/*
for (int i = 0 ; i < NUM_POINTS; i++){
colchk=0;
for (int j = NUM_POINTS-1 ; j >= 0; j--){
if(colindex[index][j]==i){
colchk=1;
break;
}
}
sq = (float)pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
gravity=GRAV*M/sq*scale*scale;
dis = (float)sqrt((double)sq);
if (i!=index && colchk==0) {
vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale;
}
}
}
//衝突対象からは重力と反発力を受ける
for (int i = 0 ; i < NUM_POINTS; i++){
colchk=0;
for (int j = NUM_POINTS-1 ; j >= 0; j--){
if(colindex[index][j]==i){
colchk=1;
break;
}
}
if (i!=index && colchk==1) {
vx += ((pos[i][0]-xn)/dis)*gravity*ANIM*scale;
vy += ((pos[i][1]-yn)/dis)*gravity*ANIM*scale;
vz += ((pos[i][2]-zn)/dis)*gravity*ANIM*scale;
}
}
rvec[0]=vcol_add[0]/sqrt(pow(vcol_add[0],2)+pow(vcol_add[1],2)+pow(vcol_add[0],2));
rvec[1]=vcol_add[1]/sqrt(pow(vcol_add[0],2)+pow(vcol_add[1],2)+pow(vcol_add[0],2));
rvec[2]=vcol_add[2]/sqrt(pow(vcol_add[0],2)+pow(vcol_add[1],2)+pow(vcol_add[0],2));
dotV=rvec[0]*v_add[0]+rvec[1]*v_add[1]+rvec[2]*v_add[2];
//dotVcol=rvec[0]*vcol_add[0]+rvec[1]*vcol_add[1]+rvec[2]*vcol_add[2];
if(dotV>=0){
vx+=v_add[0];
vy+=v_add[1];
vz+=v_add[2];
}
else if(-dotV>=dotVcol){
vx+=v_add[0]+rvec[0]*dotVcol;
vy+=v_add[1]+rvec[1]*dotVcol;
vz+=v_add[2]+rvec[2]*dotVcol;
}
else{
vx+=v_add[0]+rvec[0]*dotV;
vy+=v_add[1]+rvec[1]*dotV;
vz+=v_add[2]+rvec[2]*dotV;
}
*/
v_buff[index][0] = vx;
v_buff[index][1] = vy;
v_buff[index][2] = vz;
}
__global__ void grav_vupdate(float(*vec)[3],float(*v_buff)[3])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
vec[index][0]=v_buff[index][0];
vec[index][1]=v_buff[index][1];
vec[index][2]=v_buff[index][2];
}
//バッファ類クリア
__global__ void buff_clear(float(*v_buff)[3],float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ;
for (int i=0; i < 3; i++){
v_buff[index][i]=0;
}
for (int i=0; i < NUM_POINTS; i++){
coltime[index][i]=0;
colindex[index][i]=NUM_POINTS;
}
}
//重力影響後の座標を決定
__global__ void grav_p(float(*pos)[3], float(*vec)[3])
{
float xn,yn,zn,vx,vy,vz;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
pos[index][0] = xn + vx * ANIM;
pos[index][1] = yn + vy * ANIM;
pos[index][2] = zn + vz * ANIM;
}
// 粒子を初期位置に配置.
void setInitialPosition(void)
{
for (int i = 0; i < NUM_POINTS; i++) {
for (int j = 0 ; j < 3 ; j++){
h_point[i][j] = (float)(rand()-rand()) / RAND_MAX * INIT_WIDTH/vision*2 ;
v_point[i][j] = 0;
h_buff[i][j] = 0;
}
st_point[i]=visc;
e_point[i]=ref;
J_point[i]=cap*M*temp;
for (int j = 0; j < NUM_POINTS; j++) {
colsyncindex[i][j]=NUM_POINTS;
}
}
checkCudaErrors(cudaMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&v_buff, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dst_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&de_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int)));
checkCudaErrors(cudaMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(v_buff, h_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dcolsynctime, colsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dcolsyncindex, colsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , cudaMemcpyHostToDevice));
}
//CUDA実行関数
void launchGPUKernel(unsigned int num_particles,float(*pos)[3],float(*vec)[3],float(*v_buff)[3],float(*sti),float(*e),float(*J),float(*coltime)[NUM_POINTS],int(*colindex)[NUM_POINTS])
{
dim3 grid(Grid_x,Grid_y,Grid_z);
dim3 block(Block_x,Block_y,Block_z);
grav_coldetect<<<grid , block>>>(pos, vec,coltime,colindex);
grav_colv<<<grid , block>>>(pos,vec,v_buff,sti,e,J,coltime,colindex);
grav_v<<<grid , block>>>(pos,vec,v_buff,colindex);
grav_vupdate<<<grid , block>>>(vec,v_buff);
buff_clear<<<grid , block>>>(v_buff,coltime,colindex);
grav_p<<<grid , block>>>(pos,vec);
}
//アニメーション動作
void runGPUKernel(void)
{
launchGPUKernel(NUM_POINTS, d_point, dv_point,v_buff,dst_point, de_point,dJ_point,dcolsynctime,dcolsyncindex);
checkCudaErrors(cudaMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_buff, v_buff, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(colsynctime,dcolsynctime, NUM_POINTS*NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(colsyncindex,dcolsyncindex, NUM_POINTS*NUM_POINTS * sizeof(int) , cudaMemcpyDeviceToHost));
anim_time += anim_dt;
}
//ビュー定義
void defineViewMatrix(double phi, double theta)
{
unsigned int i;
double c, s, xy_dist;
double x_axis[3], y_axis[3], z_axis[3];
// 視点の設定.
eye[Z] = sin(theta * PI / 180.0);
xy_dist = cos(theta * PI / 180.0);
c = cos(phi * PI / 180.0);
s = sin(phi * PI / 180.0);
eye[X] = xy_dist * c;
eye[Y] = xy_dist * s;
up[X] = - c * eye[Z];
up[Y] = - s * eye[Z];
up[Z] = s * eye[Y] + c * eye[X];
normVec(up);
// 視点を原点とする座標系の定義.
for (i = 0; i < 3; i++)
{
z_axis[i] = eye[i] - center[i];
}
normVec(z_axis);
cross(up, z_axis, x_axis);
normVec(x_axis);
cross(z_axis, x_axis, y_axis);
gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]);
}
void display(void)
{
double nrml_vec[3];
light_pos[0] = (float)eye[X];
light_pos[1] = (float)eye[Y];
light_pos[2] = (float)eye[Z];
light_pos[3] = 0.0f;
//CUDA開始
runGPUKernel();
// 光源の設定
glLightfv(GL_LIGHT0, GL_POSITION, light_pos);
//glEnable(GL_LIGHTING);
glMatrixMode(GL_PROJECTION);
//glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000);
glLoadIdentity();
glOrtho(-vision_size-right_motion/2, vision_size+right_motion/2, -vision_size-right_motion/2, vision_size+right_motion/2, -100*vision_size, 100*vision_size);
glViewport(0, 0, window_width, window_height);
defineViewMatrix(phi, theta);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_QUADS);
//printf("%f,%f,%f\n",h_point[0][0],h_point[0][1],h_point[0][2]);
//printf("%f,%f,%f\n",J_point[0]/M/cap,J_point[0]/M/cap,J_point[2]/M/cap);
//球体をポリゴンで作成 TBD メタボール
for (int k = 0 ; k < NUM_POINTS ; k++)
{
//温度によって色を変化
if(J_point[k]/M/cap>10000){
glColor3f(0.75f,1.0f,1.0f);
}
else if(J_point[k]/M/cap>8000){
glColor3f((GLfloat)(1.0-0.25/2000*(J_point[k]/M/cap-8000)),1.0f,1.0f);
}
else if(J_point[k]/M/cap>6000){
glColor3f(1.0f,(GLfloat)(0.75+0.25/2000*(J_point[k]/M/cap-6000)),(GLfloat)(1.0/2000*(J_point[k]/M/cap-6000)));
}
else if(J_point[k]/M/cap>4000){
glColor3f(1.0f,(GLfloat)(0.25+0.5/2000*(J_point[k]/M/cap-4000)),0.0f);
}
else if(J_point[k]/M/cap>2000){
glColor3f((GLfloat)(0.5/2000*J_point[k]/M/cap),0.25f,0.0f);
}
else{
glColor3f((GLfloat)(0.5/2000*J_point[k]/M/cap),0.25/2000*J_point[k]/M/cap,0.0f);
}
for (int i = 0 ; i < dev + 1 ; i ++)
{
for (int j = 0 ; j < 2 * dev + 1 ; j++)
{
normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec);
glNormal3dv(nrml_vec);
glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]);
glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]);
}
}
}
glEnd();
glutSwapBuffers();
glutPostRedisplay();
}
void mouse_button(int button, int state, int x, int y)
{
if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON))
motion_p = true;
if ((state == GLUT_DOWN) && (button == GLUT_RIGHT_BUTTON))
motion_w = true;
else if (state == GLUT_UP) {
motion_p = false;
motion_w = false;
}
mouse_old_x = x;
mouse_old_y = y;
}
void mouse_motion(int x, int y)
{
int dx, dy;
dx = x - mouse_old_x;
dy = y - mouse_old_y;
if (motion_p) {
phi -= dx * 0.2;
theta += dy * 0.2;
}
if (motion_w) {
right_motion += dx ;
up_motion -= dy ;
}
mouse_old_x = x;
mouse_old_y = y;
glutPostRedisplay();
}
void resize(int width, int height)
{
window_width = width;
window_height = height;
}
void keyboard(unsigned char key, int x, int y)
{
switch (key) {
case 'q':
case 'Q':
case '\033':
exit(0);
default:
break;
}
}
bool initGL(void)
{
glClearColor(0.0f, 0.0f , 0.0f, 0.5f);
glEnable(GL_DEPTH_TEST);
glClearDepth(1.0);
glDepthFunc(GL_LESS);
glEnable(GL_LIGHT0);
return true;
}
int main(int argc, char** argv)
{
double yangle,zangle;
double r;
point = (double **)malloc(sizeof(double *) * num_points);
for (int i = 0 ; i < num_points ; i++)
{
point[i] = (double *)malloc(sizeof(double) * 3);
}
for (int i = 0 ; i < dev + 1; i ++)
{
zangle = i * PI / dev;
r=R * sin(zangle);
for (int j = 0 ; j < dev + 1; j++)
{
yangle=j * PI * 2 / dev;
point[i * dev + j][X] = r * sin(yangle);
point[i * dev + j][Y] = r * cos(yangle);
point[i * dev + j][Z] = R * cos(zangle);
}
}
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("3D CUDA Simulation");
glutDisplayFunc(display);
glutReshapeFunc(resize);
glutKeyboardFunc(keyboard);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
setInitialPosition();
if (!initGL())
return 1;
glutMainLoop();
cudaFree(dst_point);
cudaFree(de_point);
cudaFree(dJ_point);
for (int i = 0 ; i < num_points ; i++)
{
free (point[i]);
cudaFree(d_point[i]);
cudaFree(dv_point[i]);
cudaFree(v_buff[i]);
cudaFree(dcolsynctime[i]);
cudaFree(dcolsyncindex[i]);
}
free (point);
cudaFree(d_point);
cudaFree(dv_point);
cudaFree(v_buff);
cudaFree(dcolsynctime);
cudaFree(dcolsyncindex);
cudaDeviceReset();
return 0;
}
|
8010ceeef33193f3a13552b2aecc621769b42133.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// PAising version 1.16. This program employs multi-spin coding.
// This program is introduced in the paper:
// L.Yu. Barash, M. Weigel, M. Borovsky, W. Janke, L.N. Shchur, GPU accelerated population annealing algorithm
// This program is licensed under a Creative Commons Attribution 4.0 International License:
// http://creativecommons.org/licenses/by/4.0/
//
// Use command line option -? to print list of available command line options.
// All of the command line options are optional.
//
#include <iostream>
#include <fstream>
#include <iomanip>
#include <hiprand/hiprand_kernel.h>
#ifdef _WIN32 // this program is compatible with any of the Windows, Unix/Linux, MacOS environments
#include <direct.h>
#else
#include <sys/stat.h>
#endif
// #define MHR // uncomment/comment to enable/disable multi-histogram reweighting
// #define AdaptiveStep // uncomment/comment to enable/disable adaptive temperature step
// #define EnergiesPopStore // uncomment/comment to enable/disable storing energies at each T
#define L 64 // linear size of the system in x,y direction
#define Ldiv2 (L/2)
#define N (L*L)
#define RNGseed time(NULL) // Use 32-bit integer as a seed for random number generation, e.g., time(NULL)
typedef hiprandStatePhilox4_32_10_t RNGState;
#define MSbits 32 // Use 8, 16, 32 or 64 Multi-spin bits per word
unsigned int EQsweeps = 100; // number of equilibration sweeps
double Binit = 0; // initial inverse temperature
double Bfin = 1; // final inverse temperature
double dBinit = 0.005; // inverse temperature step
#ifdef AdaptiveStep
double MinOverlap = 0.85; // minimal value of acceptable overlap of energy histograms
double MaxOverlap = 0.87; // maximal value of acceptable overlap of energy histograms
#endif
int Rinit = 20000; // Initial size of population of replicas
int runs = 1; // number of population annealing algorithm independent runs
int OutputPrecision = 11; // precision (number of digits) of the output
const unsigned int AA = 1664525; // linear congruential generator parameters
const unsigned int CC = 1013904223;
#ifdef MHR
const short MHR_Niter = 1; // number of iterations for multi-histogram analysis (single iteration is usually sufficient)
#endif
const int boltzTableL = 2; // Boltzmann factor table length
const int nBmax = 10000; // number of temperature steps should not exceed nBmax
texture<unsigned int,1,hipReadModeElementType> boltzT;
using namespace std;
#define EQthreads 128 // number of threads per block for the equilibration kernel
#define Nthreads 1024 // number of threads per block for the parallel reduction algorithm
// Use Nthreads=1024 for CUDA compute capability 2.0 and above; Nthreads=512 for old devices with CUDA compute capability 1.x.
double* Qd; double* ioverlapd;
#if MSbits == 8
#define MultiSpin signed char
#elif MSbits == 16
#define MultiSpin signed short
#elif MSbits == 32
#define MultiSpin signed int
#elif MSbits == 64
#define MultiSpin signed long long int
#endif
// struct Replica covers all information about the replica including its configuration, sublattice magnetizations,
// internal energy and number of replica's offspring
struct Replica{
MultiSpin gA[N/2]; // sublattice configurations with multipsin-coding = one value in array represents
MultiSpin gB[N/2]; // spins of 8 different replicas in the same site in lattice
int IE[MSbits]; // internal energy
int M[MSbits]; // magnetization
unsigned int Roff[MSbits]; // number of replica's offspring
union{double ValDouble[2]; unsigned int ValInt[MSbits+2];} parSum; // these variables are used for storing sums
bool isActive[MSbits]; // isActive[i] determines if the i-th replica is active
};
// CUDA error checking macro
#define CUDAErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s ; %s ; line %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class sometype> __inline__ __device__ sometype smallblockReduceSum(sometype val) // use when blockDim.x < 32
{ // blockDim.x must be a power of 2
static __shared__ sometype shared[32];
shared[threadIdx.x] = val;
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1){
__syncthreads(); if (threadIdx.x < stride) shared[threadIdx.x] += shared[threadIdx.x+stride];
}
__syncthreads(); return shared[0];
}
#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300)
template <class sometype> __inline__ __device__ sometype warpReduceSum(sometype val)
{
for (int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down_sync(0xFFFFFFFF, val, offset);
return val;
}
template <class sometype> __inline__ __device__ sometype blockReduceSum(sometype val) // use when blockDim.x is divisible by 32
{
static __shared__ sometype shared[32]; // one needs to additionally synchronize threads after execution
int lane = threadIdx.x % warpSize; // in the case of multiple use of blockReduceSum in a single kernel
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane==0) shared[wid]=val;
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSum(val);
return val;
}
#else
template <class sometype> __inline__ __device__ sometype blockReduceSum(sometype val) // blockDim.x must be a power of 2
{
static __shared__ sometype shared[Nthreads];
shared[threadIdx.x] = val;
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1){
__syncthreads(); if (threadIdx.x < stride) shared[threadIdx.x] += shared[threadIdx.x+stride];
}
__syncthreads(); return shared[0];
}
#endif
#if (__CUDACC_VER_MAJOR__ < 8) || ( defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600 )
__device__ double atomicAdd(double* address, double val) // allows to use atomicAdd operation for double precision floating point values
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 320 && MSbits == 64)
__device__ unsigned long long int atomicXor(unsigned long long int* address, unsigned long long int val) // allows to use atomicXor operation for 64-bit integers
{
unsigned int val1,val2; val1 = val & 0xFFFFFFFF; val2 = val>>32;
val1 = atomicXor((unsigned int*)address,val1);
val2 = atomicXor((unsigned int*)address+1,val2);
return (unsigned long long int)val1 + ((unsigned long long int)val2)<<32;
}
#endif
__global__ void ReplicaInit(Replica* Rd, int rg, int R, unsigned long long rng_seed, unsigned long long initial_sequence){ // initialization of spin lattices of all replicas
unsigned int B = blockIdx.x, t = threadIdx.x;
RNGState localrng; hiprand_init(rng_seed,initial_sequence+(t+B*EQthreads),0,&localrng);
for (unsigned int idx = t; idx < (N/2); idx += EQthreads){
#if MSbits == 8
Rd[B].gA[idx] = hiprand(&localrng) & 0xFF;
Rd[B].gB[idx] = hiprand(&localrng) & 0xFF;
#elif MSbits == 16
Rd[B].gA[idx] = hiprand(&localrng) & 0xFFFF;
Rd[B].gB[idx] = hiprand(&localrng) & 0xFFFF;
#elif MSbits == 32
Rd[B].gA[idx] = hiprand(&localrng);
Rd[B].gB[idx] = hiprand(&localrng);
#elif MSbits == 64
Rd[B].gA[idx] = (((unsigned long long int)hiprand(&localrng))<<32) + hiprand(&localrng) ;
Rd[B].gB[idx] = (((unsigned long long int)hiprand(&localrng))<<32) + hiprand(&localrng) ;
#endif
}
if(t < MSbits) if((B*MSbits+t)<R) Rd[B].isActive[t] = true; else Rd[B].isActive[t] = false;
}
// parallel spin update
__global__ void checkKerALL(Replica* Rd, int rg, unsigned int sweeps, unsigned long long rng_seed, unsigned long long initial_sequence) // equilibration process
{
MultiSpin mspin; unsigned int B = blockIdx.x, t = threadIdx.x, ran, idx, i1, i3, i4, tx, ty; // B is replica index
RNGState localrng; hiprand_init(rng_seed,initial_sequence+(t+blockIdx.x*EQthreads),0,&localrng);
for(int sweep=0; sweep<sweeps; sweep++){ // sweeps loop
// sublattice A
for (idx = t; idx < (N/2); idx += EQthreads){ // sublattice A
ty = idx / Ldiv2; tx = idx - ty * Ldiv2;
i1 = ty * Ldiv2 + ((ty&1) ? (tx + 1) : (tx + Ldiv2 - 1)) % Ldiv2;
i3 = ((ty + L - 1) % L) * Ldiv2 + tx; i4 = ((ty + 1) % L) * Ldiv2 + tx;
mspin = Rd[B].gA[idx];
// detecting anti-parallel orientations with NN (Ii = S ^ Ni)
MultiSpin I1 = mspin ^ Rd[B].gB[i1]; // left- or right-neighbour in B
MultiSpin I2 = mspin ^ Rd[B].gB[idx]; // right- or left-neighbour spins in the sublattice B
MultiSpin I3 = mspin ^ Rd[B].gB[i3]; // lower-neighbour spins in the sublattice B
MultiSpin I4 = mspin ^ Rd[B].gB[i4]; // upper-neighbour spins in the sublattice B
// performing summation of anti-parallel couplings
MultiSpin x12 = I1 ^ I2;
MultiSpin x34 = I3 ^ I4;
MultiSpin a12 = I1 & I2;
MultiSpin a34 = I3 & I4;
MultiSpin sum0 = x12 ^ x34;
MultiSpin sum1 = x12 & x34 ^ a12 ^ a34;
MultiSpin sum2 = a12 & a34;
MultiSpin cond4 = 0;
MultiSpin cond8 = 0; MultiSpin imask=0x1; ran = hiprand(&localrng);
for (unsigned char i = 0; i < MSbits; ++i){
cond4 |= (-(ran < tex1Dfetch(boltzT, 0))) & imask;
cond8 |= (-(ran < tex1Dfetch(boltzT, 1))) & imask;
imask <<= 1; ran = AA * ran + CC;
}
// acceptance mask
MultiSpin Acc = (sum1|sum2) | ( (~(sum1|sum2)) & ((sum0&cond4) | (~sum0&cond8)) );
// Metropolis update + store new configuration to global memory
Rd[B].gA[idx] = mspin ^ Acc;
}
__syncthreads();
// sublattice B
for (idx = t; idx < (N/2); idx += EQthreads){ // sublattice B
ty = idx / Ldiv2; tx = idx - ty * Ldiv2;
i1 = ty * Ldiv2 + ((ty&1) ? (tx + Ldiv2 - 1) : (tx + 1)) % Ldiv2;
i3 = ((ty + L - 1) % L) * Ldiv2 + tx; i4 = ((ty + 1) % L) * Ldiv2 + tx;
mspin = Rd[B].gB[idx];
MultiSpin I1 = mspin ^ Rd[B].gA[i1]; // left- or right-neighbour in A
MultiSpin I2 = mspin ^ Rd[B].gA[idx];// right- or left-neighbour spins in the sublattice A
MultiSpin I3 = mspin ^ Rd[B].gA[i3]; // lower-neighbour spins in the sublattice A
MultiSpin I4 = mspin ^ Rd[B].gA[i4]; // upper-neighbour spins in the sublattice A
MultiSpin x12 = I1 ^ I2;
MultiSpin x34 = I3 ^ I4;
MultiSpin a12 = I1 & I2;
MultiSpin a34 = I3 & I4;
MultiSpin sum0 = x12 ^ x34;
MultiSpin sum1 = x12 & x34 ^ a12 ^ a34;
MultiSpin sum2 = a12 & a34;
MultiSpin cond4 = 0;
MultiSpin cond8 = 0; MultiSpin imask=0x1; ran = hiprand(&localrng);
for (unsigned char i = 0; i < MSbits; ++i){
cond4 |= (-(ran < tex1Dfetch(boltzT, 0))) & imask;
cond8 |= (-(ran < tex1Dfetch(boltzT, 1))) & imask;
imask <<= 1; ran = AA * ran + CC;
}
MultiSpin Acc = (sum1|sum2) | ( (~(sum1|sum2)) & ((sum0&cond4) | (~sum0&cond8)) );
Rd[B].gB[idx] = mspin ^ Acc;
}
__syncthreads();
}
}
__global__ void energyKer(Replica* Rd) // calculation of energy and magnetization for each replica
{
int e, m; unsigned int t = threadIdx.x, idx, iL, iU, B = blockIdx.x, tx, ty;
MultiSpin sum0, sum1, sum2, sA, sB, Ai2, Bi2, Ai4, Bi4;
for (idx = t; idx < (N/2); idx += EQthreads){
if(t < EQthreads){
sA = Rd[B].gA[idx]; sB = Rd[B].gB[idx];
ty = idx / Ldiv2; tx = idx - ty * Ldiv2;
iL = ty * Ldiv2 + (tx + Ldiv2 - 1) % Ldiv2;
iU = ((ty + 1) % L) * Ldiv2 + tx;
if(ty&1){ Ai2 = sB; Bi2 = Rd[B].gA[iL]; }
else{ Ai2 = Rd[B].gB[iL]; Bi2 = sA; }
Ai4 = Rd[B].gB[iU]; Bi4 = Rd[B].gA[iU];
// detecting anti-parallel orientations
MultiSpin I1 = sA ^ Ai2;
MultiSpin I2 = sA ^ Ai4;
MultiSpin I3 = sB ^ Bi2;
MultiSpin I4 = sB ^ Bi4;
// performing summation of anti-parallel couplings
MultiSpin x12 = I1 ^ I2;
MultiSpin x34 = I3 ^ I4;
MultiSpin a12 = I1 & I2;
MultiSpin a34 = I3 & I4;
sum0 = x12 ^ x34;
sum1 = x12 & x34 ^ a12 ^ a34;
sum2 = a12 & a34;
}
// calculating energy contributions for replicas
for (unsigned char i = 0; i < MSbits; ++i){
if(t < EQthreads){
e = 2*((int)(sum0&0x1) + 2*(int)(sum1&0x1) + 4*(int)(sum2&0x1)) - 4;
m = 2*((int)(sA&0x1) + (int)(sB&0x1)) - 2;
} else e = m = 0;
e = blockReduceSum<int>(e); __syncthreads();
m = blockReduceSum<int>(m); __syncthreads();
if (t == 0){
if (idx==t){
Rd[B].IE[i] = e;
Rd[B].M[i] = m;
}else{
Rd[B].IE[i] += e;
Rd[B].M[i] += m;
}
}
// bit shift operation => moving to next replica in bit string
sum0 >>= 1; sum1 >>= 1; sum2 >>= 1;
sA >>= 1; sB >>= 1;
}
}
}
__global__ void QKer(Replica* Rd, int rg, double dB, double Emean, int CalcPart, double* Qd) // calculation of partition function ratio
{
if(CalcPart==0){ // first part of the calculation
double factor; int idx = blockIdx.x; int br = threadIdx.x; // summation of exponential
factor = Rd[idx].isActive[br] ? exp(-dB*(Rd[idx].IE[br]-Emean)) : 0.0 ; // Boltzmann-like factors
#if MSbits < 32
factor = smallblockReduceSum<double>(factor);
#else
factor = blockReduceSum<double>(factor);
#endif
if (br == 0) Rd[idx].parSum.ValDouble[0] = factor; // is saved to global memory
} else if(CalcPart==1){ // second part of the calculation
double factor; int t = threadIdx.x; int b = blockIdx.x;
int idx = t + Nthreads * b;
factor = (idx < rg) ? Rd[idx].parSum.ValDouble[0]: 0.0;
factor = blockReduceSum<double>(factor);
if(t == 0 ) Rd[idx].parSum.ValDouble[1] = factor; // sum for all threads in current block is saved to global memory
} else{ // third part of the calculation, summation of the partial sums
double factor; int j, t = threadIdx.x; double MyParSum = 0;
for (j=0; j*Nthreads < rg; j += Nthreads){
factor = (t+j)*Nthreads < rg ? Rd[(t+j)*Nthreads].parSum.ValDouble[1] : 0.0;
factor = blockReduceSum<double>(factor); __syncthreads();
MyParSum += factor;
}
if(t==0) *Qd = MyParSum;
}
}
__global__ void CalcTauKer(Replica* Rd, int Rinit, int R, int rg, double lnQ, double dB, unsigned long long rng_seed, unsigned long long initial_sequence) // calculation of numbers of copies for all replicas
{
int t = threadIdx.x; int b = blockIdx.x;
unsigned char br = blockIdx.y; // multispin replica index
int idx = t + Nthreads * b; double mu, mufloor;
if (idx < rg) if (Rd[idx].isActive[br]){ // nearest integer resampling
mu = ((double)Rinit)/R*exp(-dB*(double)Rd[idx].IE[br] - lnQ);
mufloor = floor(mu);
RNGState localrng; hiprand_init(rng_seed,initial_sequence+(br+MSbits*idx),0,&localrng);
if(hiprand_uniform_double(&localrng) < (mu-mufloor))
Rd[idx].Roff[br] = mufloor + 1;
else Rd[idx].Roff[br] = mufloor; // number of copies
} else Rd[idx].Roff[br] = 0;
}
__global__ void CalcParSum(Replica* Rd, int rg, int CalcPart, int* Rnew)
{
if(CalcPart==0){ // first part of the calculation
unsigned int parS; int t = threadIdx.x; int b = blockIdx.x;
parS = Rd[b].Roff[t]; // (Rd[b].Roff[0] + Rd[b].Roff[1] + ... + Rd[b].Roff[MSbits-1]) is saved to global memory
#if MSbits < 32
parS = smallblockReduceSum<unsigned int>(parS);
#else
parS = blockReduceSum<unsigned int>(parS);
#endif
if(t==0) Rd[b].parSum.ValInt[MSbits] = parS;
} else if(CalcPart==1){ // second part of the calculation
unsigned int parS; int t = threadIdx.x; int b = blockIdx.x; int idx = t + b*Nthreads;
parS = (idx < rg) ? Rd[idx].parSum.ValInt[MSbits] : 0;
parS = blockReduceSum<unsigned int>(parS);
// sum of partial sums for replica groups b*Nthreads,b*Nthreads+1,...,(b+1)*Nthreads-1 is saved to global memory.
if(t==0) Rd[idx].parSum.ValInt[MSbits+1] = parS;
} else{ // third part of the calculation
unsigned int parS; int j, t = threadIdx.x, b = blockIdx.x;
unsigned char br = blockIdx.y; __shared__ unsigned int val;
int idx = t + Nthreads * b; unsigned int MyParSum = 0;
for (j = 0; j<b; j+=Nthreads){ // we sum of Roff for all blocks from 0 to (b-1) and for all multi-spin indices.
parS = (t+j < b) ? Rd[(t+j)*Nthreads].parSum.ValInt[MSbits+1] : 0;
parS = blockReduceSum<unsigned int>(parS);
if(t==0) val = parS; __syncthreads(); MyParSum += val;
}
if(idx < rg){
for(j=Nthreads*b;j<idx;j++) MyParSum+=Rd[j].parSum.ValInt[MSbits]; // we add parSum[MSbits] for current block threads from 0 to (t-1)
for(j=0;j<br;j++) MyParSum+=Rd[idx].Roff[j]; // we add Roff for j = 0,1,..., br-1.
Rd[idx].parSum.ValInt[br] = MyParSum; // we save partial sum
if(idx==(rg-1)) if(br==(MSbits-1)) *Rnew = MyParSum + Rd[idx].Roff[br]; // we save new population size
}
}
}
__global__ void resampleKer(Replica* Rd, Replica* RdNew, int rg) // renumeration and copying of the replicas (the main part of the resampling process)
{
int t = threadIdx.x + blockIdx.z*blockDim.x; // index of spin variable (from 0 -> N/2-1)
int bx = blockIdx.x; // represents index of group of replicas (j)
signed char by = blockIdx.y; // represents index of replica in group/word (k)
int it_k, it_j;
#if MSbits == 64
unsigned long long int mask = 0x1; mask <<= by; // mask for selecting spin from old population
unsigned long long int copy_sourceA = mask & Rd[bx].gA[t]; // selected spin from sublattice A
unsigned long long int copy_sourceB = mask & Rd[bx].gB[t]; // and B
#else
unsigned int mask = 0x1; mask <<= by; // mask for selecting spin from old population
unsigned int copy_sourceA = mask & Rd[bx].gA[t]; // selected spin from sublattice A
unsigned int copy_sourceB = mask & Rd[bx].gB[t]; // and B
#endif
for (int p = 0; p < Rd[bx].Roff[by]; ++p){
it_k = (Rd[bx].parSum.ValInt[by] + p) / rg;
it_j = (Rd[bx].parSum.ValInt[by] + p) % rg;
#if MSbits == 8
mask = 0x1; mask <<= (it_k + ((t&3)<<3));
if(copy_sourceA!=0) atomicXor((unsigned int*)&(RdNew[it_j].gA[t-(t&3)]),mask);
if(copy_sourceB!=0) atomicXor((unsigned int*)&(RdNew[it_j].gB[t-(t&3)]),mask);
#elif MSbits == 16
mask = 0x1; mask <<= (it_k + ((t&1)<<4));
if(copy_sourceA!=0) atomicXor((unsigned int*)&(RdNew[it_j].gA[t-(t&1)]),mask);
if(copy_sourceB!=0) atomicXor((unsigned int*)&(RdNew[it_j].gB[t-(t&1)]),mask);
#elif MSbits == 32
mask = 0x1; mask <<= it_k;
if(copy_sourceA!=0) atomicXor((unsigned int*)&(RdNew[it_j].gA[t]),mask);
if(copy_sourceB!=0) atomicXor((unsigned int*)&(RdNew[it_j].gB[t]),mask);
#elif MSbits == 64
mask = 0x1; mask <<= it_k;
if(copy_sourceA!=0) atomicXor((unsigned long long int*)&(RdNew[it_j].gA[t]),mask);
if(copy_sourceB!=0) atomicXor((unsigned long long int*)&(RdNew[it_j].gB[t]),mask);
#endif
if(t==0) RdNew[it_j].isActive[it_k] = true;
else if(t==1) RdNew[it_j].IE[it_k] = Rd[bx].IE[by];
}
}
__global__ void CalcAverages(Replica* Repd, int rg, double* Averages) // calculation of observables via averaging over the population
{
int t = threadIdx.x, b = blockIdx.x, by = blockIdx.y; int idx = t + Nthreads * b;
double currE,currE2,currM,currM2,currM4;
if(idx<rg) if(Repd[idx].isActive[by]){
currE = Repd[idx].IE[by]; currM = Repd[idx].M[by]; if(currM<0) currM=-currM;
} else{ currE = 0; currM = 0;} else{ currE = 0; currM = 0;}
currE2 = currE*currE; currM2 = currM*currM; currM4 = currM2*currM2;
currE = blockReduceSum<double>(currE); if(t==0) atomicAdd(&Averages[0], currE); __syncthreads();
currE2 = blockReduceSum<double>(currE2); if(t==0) atomicAdd(&Averages[1], currE2); __syncthreads();
currM = blockReduceSum<double>(currM); if(t==0) atomicAdd(&Averages[2], currM); __syncthreads();
currM2 = blockReduceSum<double>(currM2); if(t==0) atomicAdd(&Averages[3], currM2); __syncthreads();
currM4 = blockReduceSum<double>(currM4); if(t==0) atomicAdd(&Averages[4], currM4);
}
#ifdef MHR
__global__ void UpdateShistE(Replica* Repd, int rg, int* ShistE) // adding energy histogram of the current temperature step for the MHR analysis
{
int t = threadIdx.x, b = blockIdx.x, by = blockIdx.y; int idx = t + Nthreads * b;
if(idx<rg) if(Repd[idx].isActive[by]){
atomicAdd(&ShistE[(2*N+Repd[idx].IE[by])/4],1);
}
}
#endif
#ifdef AdaptiveStep
__global__ void HistogramOverlap(Replica* Repd, int Rinit, int R, int rg, double lnQ, double dB, double* overlap) // calculating histogram overlap
{
double PartialOverlap;
int t = threadIdx.x, idx = threadIdx.x + Nthreads * blockIdx.x, by = blockIdx.y;
if(idx<rg && Repd[idx].isActive[by])
PartialOverlap = min(1.0,((double)Rinit)/R*exp(-dB*(double)Repd[idx].IE[by] - lnQ));
else PartialOverlap = 0;
PartialOverlap = blockReduceSum<double>(PartialOverlap);
if(t==0) atomicAdd(overlap,PartialOverlap);
}
double CalcOverlap(Replica* Rep_d, double dB, int R, double Emean){ // Calculates histogram overlap
double q, lnQ, ioverlaph;
int rg = (int)ceil(R/(float)MSbits);
int NblocksR = (int)ceil(rg/(double)Nthreads);
dim3 DimGridR(NblocksR,MSbits,1);
hipLaunchKernelGGL(( QKer) , dim3(rg), dim3(MSbits) , 0, 0, Rep_d, rg, dB, Emean, 0, Qd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( QKer) , dim3(NblocksR), dim3(Nthreads) , 0, 0, Rep_d, rg, dB, Emean, 1, Qd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( QKer) , dim3(1), dim3(Nthreads) , 0, 0, Rep_d, rg, dB, Emean, 2, Qd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
CUDAErrChk( hipMemcpy(&q,Qd,sizeof(double),hipMemcpyDeviceToHost) );
lnQ = -dB * Emean + log(q) - log((double)R);
CUDAErrChk( hipMemset(ioverlapd, 0, sizeof(double)) );
hipLaunchKernelGGL(( HistogramOverlap), dim3(DimGridR),dim3(Nthreads), 0, 0, Rep_d, Rinit, R, rg, lnQ, dB, ioverlapd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
CUDAErrChk( hipMemcpy(&ioverlaph,ioverlapd,sizeof(double),hipMemcpyDeviceToHost) );
return (double)ioverlaph/R;
}
#endif
char *optarg; int opterr = 1, optind = 1, optopt, optreset;
int getopt(int nargc, char * const nargv[], const char *ostr)
{
static char *place = (char*)""; const char *oli;
if (optreset || !*place) {
optreset = 0;
if (optind >= nargc || *(place = nargv[optind]) != '-') { place = (char*)""; return (-1); }
if (place[1] && *++place == '-') { ++optind; place = (char*)""; return (-1); }
}
if ((optopt = (int)*place++) == (int)':' || !(oli = strchr(ostr, optopt))) {
if (optopt == (int)'-') return (-1);
if (!*place) ++optind;
if (opterr && *ostr != ':') (void)printf("illegal option -- %c\n", optopt);
return ((int)'?');
}
if (*++oli != ':') { optarg = NULL; if (!*place) ++optind; }
else {
if (*place) optarg = place; else if (nargc <= ++optind) {
place = (char*)""; if (*ostr == ':') return ((int)':');
if (opterr) (void)printf("option requires an argument -- %c\n", optopt);
return ((int)'?');
}
else optarg = nargv[optind];
place = (char*)""; ++optind;
}
return (optopt);
}
void PrintParameterUsage(){
cout << " Usage: PAisingMSC [options]\n"
<< " Note: all of the options are optional. Default parameter values are listed in the head of the source code. \n"
<< " Possible command line options are:\n\n"
<< " -R Rinit ( Rinit = initial size of population of replicas )\n"
<< " -t EQsweeps ( EQsweeps = number of equilibration sweeps )\n"
<< " -d dBinit ( dBinit = inverse temperature step )\n"
<< " -f Bfin ( Bfin = final value of inverse temperature )\n"
<< " -M runs ( runs = number of population annealing algorithm independent runs )\n"
<< " -s RNGseed ( RNGseed = seed for random number generation )\n"
<< " -P OutputPrecision ( OutputPrecision = precision (number of digits) of the output )\n"
<< " -o dataDirectory ( dataDirectory = data directory name )\n";
}
int main(int argc, char** argv)
{
// data directory name + create
char dataDir[200]; unsigned long long rng_seed = RNGseed; int optdir = 0;
int optc, opti; double optf;
while ((optc = getopt (argc, argv, "R:t:d:f:M:s:P:o:?")) != -1) // Processing optional command line options
switch (optc)
{
case 'R': opti = atoi(optarg); if(opti) Rinit = opti; break; // -R Rinit
case 't': opti = atoi(optarg); EQsweeps = opti; break; // -t EQsweeps
case 'd': optf = atof(optarg); if(optf > 0.0) dBinit = optf; break; // -d dBinit
case 'f': optf = atof(optarg); if(optf > 0.0) Bfin = optf; break; // -f Bfin
case 'M': opti = atoi(optarg); if(opti) runs = opti; break; // -M runs
case 's': opti = atoi(optarg); if(opti) rng_seed = opti; break; // -s RNGseed
case 'P': opti = atoi(optarg); if(opti) OutputPrecision = opti; break; // -P OutputPrecision
case 'o': if(optarg[strlen(optarg)-1]=='/') sprintf(dataDir,"%s",optarg); // -o dataDir
else sprintf(dataDir,"%s/",optarg); optdir = 1; break;
case '?': PrintParameterUsage(); return 1;
}
if(optind < argc){
for (opti = optind; opti < argc; opti++) fprintf(stderr,"Non-option argument %s\n", argv[opti]);
return 1;
}
#ifdef AdaptiveStep
if(!optdir) sprintf(dataDir, "./dataMSC_L%d_R%d_EqSw%d/", L, Rinit, EQsweeps);
#else
if(!optdir) sprintf(dataDir, "./dataMSC_L%d_R%d_EqSw%d_dB%f/", L, Rinit, EQsweeps, dBinit);
#endif
#if defined(_WIN32)
_mkdir(dataDir);
#else
mkdir(dataDir, 0777);
#endif
int rmin=0, rmax=runs-1; unsigned long long initial_sequence = 0; int rg;
double B[nBmax], Binc[nBmax]; B[0]=Binc[0]=Binit; double totPop=0;
// creating data arrays for thermodynamic variables and errors
double E[nBmax]; double M[nBmax]; double M2[nBmax]; double M4[nBmax];
double C[nBmax];
double lnQ[nBmax]; // partition function ratio
double S[nBmax]; // entropy
double BF[nBmax]; // dimensionless free energy estimate
BF[0] = - N*log(2.0); // its value at infinite temperature
int R[nBmax]; // population size
int nB;
// CUDAErrChk( hipSetDevice(0) ); // uncomment to explicitly select device number in a setup with multiple cards
CUDAErrChk(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); // prefer larger L1 cache and smaller shared memory
// GPU execution time
hipEvent_t start, stop; float Etime;
CUDAErrChk( hipEventCreate(&start) );
CUDAErrChk( hipEventCreate(&stop) );
// start evaluation time measurement
hipEventRecord(start, 0);
double *Averages; double Averages_h[5]; int* Ridev;
CUDAErrChk( hipMalloc((void**)&Averages,5*sizeof(double)) );
CUDAErrChk( hipMalloc((void**)&Qd,sizeof(double)) );
CUDAErrChk( hipMalloc((void**)&Ridev,sizeof(int)) );
CUDAErrChk( hipMalloc((void**)&ioverlapd,sizeof(double)) );
// random seed
cout <<"RNG initial seed: "<< rng_seed<<"\n";
R[0] = Rinit;
cout << "Memory use of one replica: " << sizeof(Replica) / 1024.0 / (double)MSbits << " kB \n";
cout << "Memory use of the entire population of " << R[0] << " replicas: "
<< ceil(R[0]/(double)MSbits)*sizeof(Replica) / 1024.0 / 1024.0 << " MB \n"; fflush(stdout);
// creating energy spectrum for multi-histogram reweighting
#ifdef MHR
int Ei[N+1];
for (int i = 0; i < N+1; ++i){
Ei[i] = 4*i - 2*N;
}
#endif
Replica* Rep_d;
unsigned int boltzGPU[boltzTableL]; // Boltzman factor table - host version
unsigned int* boltztext;
// memory allocation for Boltzmann factor table
CUDAErrChk( hipMalloc((void **)&boltztext, boltzTableL * sizeof(unsigned int)) );
// binding references (global & texture memory buffers)
CUDAErrChk( hipBindTexture(NULL,boltzT,boltztext,boltzTableL * sizeof(unsigned int)) );
int Ethreads = 1; while(Ethreads < EQthreads) Ethreads <<= 1;
for (int r = rmin; r <= rmax; ++r){
rg = (int)ceil(R[0]/(float)MSbits); // number of replica groups (R / MSbits)
double sumlnQ = 0.0; double q; double Emean = 0.0;
CUDAErrChk( hipMalloc((void **)&Rep_d,rg*sizeof(Replica)) );
int NblocksR = (int)ceil(rg/(float)Nthreads);
hipLaunchKernelGGL(( ReplicaInit) , dim3(rg), dim3(EQthreads) , 0, 0, Rep_d,rg,R[0],rng_seed,initial_sequence); initial_sequence+=rg*EQthreads;
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
// compute energy of all replicas at zero temperature (for 1st resampling)
hipLaunchKernelGGL(( energyKer) , dim3(rg), dim3(Ethreads) , 0, 0, Rep_d);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
// array for summing the energy histograms over inverse temperatures
#ifdef MHR
int ShistE[N+1]; int* ShistEd;
CUDAErrChk( hipMalloc((void**)&ShistEd,(N+1)*sizeof(int)) );
CUDAErrChk( hipMemset(ShistEd,0,(N+1)*sizeof(int)) );
dim3 DimGridR(NblocksR,MSbits,1);
hipLaunchKernelGGL(( UpdateShistE), dim3(DimGridR),dim3(Nthreads), 0, 0, Rep_d, rg, ShistEd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
#endif
// ------------------------------------------------------------------
// population annealing
// ------------------------------------------------------------------
int i=1, iprev=0; double deltaBeta=dBinit; B[i]=Binc[i]=B[iprev]+deltaBeta;
while(B[i]<=Bfin) {
// Boltzmann factor tabulation (only two are relevant: exp(-4*B);exp(-8*B))
boltzGPU[0] = ceil(4294967296.*exp(-4*B[i]));
boltzGPU[1] = ceil(4294967296.*exp(-8*B[i]));
// copying table to texture memory - boltztext is bounded with boltzT
CUDAErrChk( hipMemcpy(boltztext, boltzGPU, boltzTableL * sizeof(unsigned int),hipMemcpyHostToDevice) );
// compute the partition function ratio - Q
NblocksR = (int)ceil(rg/(float)Nthreads);
dim3 DimGridR(NblocksR,MSbits,1);
hipLaunchKernelGGL(( QKer) , dim3(rg), dim3(MSbits) , 0, 0, Rep_d, rg, B[i] - B[i-1], Emean, 0, Qd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( QKer) , dim3(NblocksR), dim3(Nthreads) , 0, 0, Rep_d, rg, B[i] - B[i-1], Emean, 1, Qd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( QKer) , dim3(1), dim3(Nthreads) , 0, 0, Rep_d, rg, B[i] - B[i-1], Emean, 2, Qd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
CUDAErrChk( hipMemcpy(&q,Qd,sizeof(double),hipMemcpyDeviceToHost) );
lnQ[i] = -(B[i] - B[i-1])*Emean + log(q) -log((double)R[i-1]);
hipLaunchKernelGGL(( CalcTauKer) , dim3(DimGridR), dim3(Nthreads) , 0, 0, Rep_d, Rinit, R[i-1], rg, lnQ[i], B[i] - B[i-1],rng_seed,initial_sequence); initial_sequence+=rg*MSbits;
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
// resampling new population
hipLaunchKernelGGL(( CalcParSum) , dim3(rg), dim3(MSbits) , 0, 0, Rep_d, rg, 0, Ridev);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( CalcParSum) , dim3(NblocksR), dim3(Nthreads) , 0, 0, Rep_d, rg, 1, Ridev);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( CalcParSum) , dim3(DimGridR), dim3(Nthreads) , 0, 0, Rep_d, rg, 2, Ridev);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
CUDAErrChk( hipMemcpy(&R[i], Ridev, sizeof(int),hipMemcpyDeviceToHost) );
dim3 DimGridRes(rg,MSbits,N/2/EQthreads); // resampleKer configuration with old value of rg
rg = (int)ceil(R[i]/(float)MSbits); // updated number of replica groups
DimGridR.x = NblocksR = (int)ceil(rg/(float)Nthreads); Replica* RepNew_d;
CUDAErrChk( hipMalloc((void**)&RepNew_d,rg*sizeof(Replica)) );
CUDAErrChk( hipMemset(RepNew_d,0,rg*sizeof(Replica)) );
CUDAErrChk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( resampleKer) , dim3(DimGridRes), dim3(EQthreads) , 0, 0, Rep_d, RepNew_d, rg);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
Replica* RepDel = Rep_d;
Rep_d = RepNew_d;
CUDAErrChk( hipFree(RepDel) );
// equilibrate replicas for certain number of sweeps
hipLaunchKernelGGL(( checkKerALL) , dim3(rg), dim3(EQthreads) , 0, 0, Rep_d,rg,EQsweeps,rng_seed,initial_sequence); initial_sequence+=rg*EQthreads;
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
// compute observables (E,M,O,F)
// compute energy and magnetization of all replicas
hipLaunchKernelGGL(( energyKer) , dim3(rg), dim3(Ethreads) , 0, 0, Rep_d);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
// saving results - energies
#ifdef EnergiesPopStore
Replica* Rep_h = (Replica*)malloc(rg*sizeof(Replica));
CUDAErrChk( hipMemcpy(Rep_h, Rep_d, rg*sizeof(Replica),hipMemcpyDeviceToHost) );
ofstream results;
char str[100];
char str2[100];
strcpy(str, dataDir);
sprintf(str2,"PA_energies_%d.dat",i);
strcat(str,str2);
results.open(str);
results.precision(OutputPrecision);
for (int j = 0; j < rg; ++j)
for (int l = 0; l < MSbits; ++l)
if(Rep_h[j].isActive[l]) results << Rep_h[j].IE[l] << " ";
results.close(); free(Rep_h);
#endif
#ifdef MHR
hipLaunchKernelGGL(( UpdateShistE), dim3(DimGridR),dim3(Nthreads), 0, 0, Rep_d, rg, ShistEd);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
#endif
CUDAErrChk( hipMemset(Averages, 0, 5*sizeof(double)) );
hipLaunchKernelGGL(( CalcAverages), dim3(DimGridR),dim3(Nthreads), 0, 0, Rep_d,rg,Averages);
CUDAErrChk( hipPeekAtLastError() );
CUDAErrChk( hipDeviceSynchronize() );
CUDAErrChk( hipMemcpy(Averages_h,Averages,5*sizeof(double),hipMemcpyDeviceToHost) );
E[i] = Emean = Averages_h[0] / R[i];
C[i] = (Averages_h[1] / R[i] - E[i]*E[i]) * B[i] * B[i];
M[i] = Averages_h[2] / R[i];
M2[i] = Averages_h[3] / R[i];
M4[i] = Averages_h[4] / R[i];
// dimensionless free energy
sumlnQ -= lnQ[i];
BF[i] = - N*log(2.0) + sumlnQ;
// entropy
S[i] = B[i]*E[i] - BF[i];
iprev=i; totPop+=R[i]; i++;
if(i>=nBmax){
#ifdef AdaptiveStep
fprintf(stderr,"Error: number of temperature steps exceeds nBmax=%d.\n Please consider increasing the population size or decreasing the value of MinOverlap or increasing the value of nBmax.\n",nBmax);
#else
fprintf(stderr,"Error: number of temperature steps exceeds nBmax=%d.\n Please consider increasing the inverse temperature step or increasing the value of nBmax.\n",nBmax);
#endif
return 1;
}
if (r==rmin){
#ifdef AdaptiveStep
double overlap, dBmin = 0, dBmax = deltaBeta, dBmean;
while(1){
overlap = CalcOverlap ( Rep_d, dBmax, R[iprev], Emean );
if ( (overlap >= MaxOverlap) && (B[iprev] + dBmax < Bfin) ) dBmax *= 1.1; else break;
}
if ( overlap >= MinOverlap ) dBmean = dBmax;
else while(1){ // obtaining optimal inverse temperature step with the bisection method
dBmean = 0.5 * (dBmin + dBmax);
overlap = CalcOverlap ( Rep_d, dBmean, R[iprev], Emean );
if ( overlap < MinOverlap ) dBmax = dBmean;
else if ( overlap >= MaxOverlap ) dBmin = dBmean;
else break;
}
if( (B[iprev] < Bfin) && (B[iprev] + dBmean > Bfin) ) deltaBeta = Bfin - B[iprev]; else deltaBeta = dBmean;
#endif
B[i] = Binc[i] = B[iprev] + deltaBeta;
} else B[i]=Binc[i];
}
CUDAErrChk( hipFree(Rep_d) );
nB=i;
// saving results
{
ofstream results;
char str[100];
char str2[100];
strcpy(str, dataDir);
sprintf(str2, "PA_results_run_%d.dat", r);
strcat(str,str2);
results.open(str);
results.precision(OutputPrecision);
for (int i = 0; i < nB; ++i) {
results << B[i] << " "
<< E[i] / N << " "
<< C[i] / N << " "
<< M[i] / N << " "
<< M2[i] / N / N << " "
<< M4[i] / N / N / N / N << " "
<< BF[i] / N << " "
<< S[i] / N << " "
<< R[i] << " "
<< lnQ[i] << "\n";
}
results.close();
}
// multi-histogam reweighting (MHR) analysis
#ifdef MHR
// declaring arrays used in MHR analysis
double lnOmega[N+1];
double E_MHR[nB*MHR_Niter];
double C_MHR[nB*MHR_Niter];
double BF_MHR[nB*MHR_Niter];
bool relTerm[N+1];
CUDAErrChk( hipMemcpy(ShistE,ShistEd,(N+1)*sizeof(int),hipMemcpyDeviceToHost) );
for (int l = 0; l < MHR_Niter; ++l){
// calculate lnOmega
double Sigma[nB];
double mSigma;
for (int k = 0; k < N+1; ++k){
// maxima of -S = BF - B*E
Sigma[0] = BF[0]-B[0]*Ei[k];
mSigma = Sigma[0];
for (int i = 1; i < nB; ++i){
Sigma[i] = BF[i]-B[i]*Ei[k];
if (mSigma < Sigma[i]){
mSigma = Sigma[i];
}
}
double sD = 0;
for (int i = 0; i < nB; ++i){
sD += R[i]*exp(Sigma[i]-mSigma);
}
if ((ShistE[k] == 0) || (sD == 0)){
relTerm[k] = false;
lnOmega[k] = 0;
} else {
relTerm[k] = true;
lnOmega[k] = log(ShistE[k]) - mSigma - log(sD);
}
}
// reweigting of observables
double expOm[N+1];
double Om[N+1];
double mOm;
for (int i = 0; i < nB; ++i){
// determine the maxima of the reweighting exponent
mOm = lnOmega[0] - B[i]*Ei[0];
for (int k = 0; k < N+1; ++k){
Om[k] = lnOmega[k] - B[i]*Ei[k];
if (mOm < Om[k]){
mOm = Om[k];
}
}
// calculate reweighting exponentials
double p = 0;
for (int k = 0; k < N+1; ++k){
expOm[k] = exp(Om[k] - mOm);
if (relTerm[k])
p += expOm[k];
}
double s = 0;
for (int k = 0; k < N+1; ++k){
if (relTerm[k])
s += Ei[k]*expOm[k];
}
E_MHR[i+l*nB] = s / p / N;
BF_MHR[i+l*nB] = - mOm - log(p);
BF[i] = BF_MHR[i+l*nB];
s = 0;
for (int k = 0; k < N+1; ++k){
if (relTerm[k])
s += pow(Ei[k]-E_MHR[i+l*nB]*N,2)*expOm[k];
}
C_MHR[i+l*nB] = B[i]*B[i] * s / p / N;
}
}
// saving results
{
ofstream results;
char MHRDataFile[100];
char str2[100];
strcpy(MHRDataFile, dataDir);
sprintf(str2,"PA_MHR_results_run_%d.dat",r);
strcat(MHRDataFile,str2);
results.open(MHRDataFile);
results.precision(OutputPrecision);
for (int i = 0; i < nB; ++i){
results << B[i] << " ";
for (int l = 0; l < MHR_Niter; ++l){
results << E_MHR[i+l*nB] << " ";
results << C_MHR[i+l*nB] << " ";
results << BF_MHR[i+l*nB] / N << " ";
}
results << "\n";
}
results.close();
}
CUDAErrChk( hipFree(ShistEd) );
#endif
}
CUDAErrChk( hipFree(Averages) );
CUDAErrChk( hipFree(Ridev) );
CUDAErrChk( hipFree(Qd) );
CUDAErrChk( hipFree(ioverlapd) );
CUDAErrChk( hipUnbindTexture(boltzT) );
CUDAErrChk( hipFree(boltztext));
CUDAErrChk( hipDeviceSynchronize() );
CUDAErrChk( hipEventRecord(stop, 0) );
CUDAErrChk( hipEventSynchronize(stop) );
CUDAErrChk( hipEventElapsedTime(&Etime, start, stop) );
cout << "Elapsed time: " << setprecision(8) << Etime/1000 << " s\n";
cout << "Time per spin-flip: " << setprecision(8) << Etime*1e6/EQsweeps/N/totPop << " ns\n";
CUDAErrChk( hipEventDestroy(start) );
CUDAErrChk( hipEventDestroy(stop) );
return 0;
}
| 8010ceeef33193f3a13552b2aecc621769b42133.cu | //
// PAising version 1.16. This program employs multi-spin coding.
// This program is introduced in the paper:
// L.Yu. Barash, M. Weigel, M. Borovsky, W. Janke, L.N. Shchur, GPU accelerated population annealing algorithm
// This program is licensed under a Creative Commons Attribution 4.0 International License:
// http://creativecommons.org/licenses/by/4.0/
//
// Use command line option -? to print list of available command line options.
// All of the command line options are optional.
//
#include <iostream>
#include <fstream>
#include <iomanip>
#include <curand_kernel.h>
#ifdef _WIN32 // this program is compatible with any of the Windows, Unix/Linux, MacOS environments
#include <direct.h>
#else
#include <sys/stat.h>
#endif
// #define MHR // uncomment/comment to enable/disable multi-histogram reweighting
// #define AdaptiveStep // uncomment/comment to enable/disable adaptive temperature step
// #define EnergiesPopStore // uncomment/comment to enable/disable storing energies at each T
#define L 64 // linear size of the system in x,y direction
#define Ldiv2 (L/2)
#define N (L*L)
#define RNGseed time(NULL) // Use 32-bit integer as a seed for random number generation, e.g., time(NULL)
typedef curandStatePhilox4_32_10_t RNGState;
#define MSbits 32 // Use 8, 16, 32 or 64 Multi-spin bits per word
unsigned int EQsweeps = 100; // number of equilibration sweeps
double Binit = 0; // initial inverse temperature
double Bfin = 1; // final inverse temperature
double dBinit = 0.005; // inverse temperature step
#ifdef AdaptiveStep
double MinOverlap = 0.85; // minimal value of acceptable overlap of energy histograms
double MaxOverlap = 0.87; // maximal value of acceptable overlap of energy histograms
#endif
int Rinit = 20000; // Initial size of population of replicas
int runs = 1; // number of population annealing algorithm independent runs
int OutputPrecision = 11; // precision (number of digits) of the output
const unsigned int AA = 1664525; // linear congruential generator parameters
const unsigned int CC = 1013904223;
#ifdef MHR
const short MHR_Niter = 1; // number of iterations for multi-histogram analysis (single iteration is usually sufficient)
#endif
const int boltzTableL = 2; // Boltzmann factor table length
const int nBmax = 10000; // number of temperature steps should not exceed nBmax
texture<unsigned int,1,cudaReadModeElementType> boltzT;
using namespace std;
#define EQthreads 128 // number of threads per block for the equilibration kernel
#define Nthreads 1024 // number of threads per block for the parallel reduction algorithm
// Use Nthreads=1024 for CUDA compute capability 2.0 and above; Nthreads=512 for old devices with CUDA compute capability 1.x.
double* Qd; double* ioverlapd;
#if MSbits == 8
#define MultiSpin signed char
#elif MSbits == 16
#define MultiSpin signed short
#elif MSbits == 32
#define MultiSpin signed int
#elif MSbits == 64
#define MultiSpin signed long long int
#endif
// struct Replica covers all information about the replica including its configuration, sublattice magnetizations,
// internal energy and number of replica's offspring
struct Replica{
MultiSpin gA[N/2]; // sublattice configurations with multipsin-coding = one value in array represents
MultiSpin gB[N/2]; // spins of 8 different replicas in the same site in lattice
int IE[MSbits]; // internal energy
int M[MSbits]; // magnetization
unsigned int Roff[MSbits]; // number of replica's offspring
union{double ValDouble[2]; unsigned int ValInt[MSbits+2];} parSum; // these variables are used for storing sums
bool isActive[MSbits]; // isActive[i] determines if the i-th replica is active
};
// CUDA error checking macro
#define CUDAErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s ; %s ; line %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class sometype> __inline__ __device__ sometype smallblockReduceSum(sometype val) // use when blockDim.x < 32
{ // blockDim.x must be a power of 2
static __shared__ sometype shared[32];
shared[threadIdx.x] = val;
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1){
__syncthreads(); if (threadIdx.x < stride) shared[threadIdx.x] += shared[threadIdx.x+stride];
}
__syncthreads(); return shared[0];
}
#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300)
template <class sometype> __inline__ __device__ sometype warpReduceSum(sometype val)
{
for (int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down_sync(0xFFFFFFFF, val, offset);
return val;
}
template <class sometype> __inline__ __device__ sometype blockReduceSum(sometype val) // use when blockDim.x is divisible by 32
{
static __shared__ sometype shared[32]; // one needs to additionally synchronize threads after execution
int lane = threadIdx.x % warpSize; // in the case of multiple use of blockReduceSum in a single kernel
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane==0) shared[wid]=val;
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSum(val);
return val;
}
#else
template <class sometype> __inline__ __device__ sometype blockReduceSum(sometype val) // blockDim.x must be a power of 2
{
static __shared__ sometype shared[Nthreads];
shared[threadIdx.x] = val;
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1){
__syncthreads(); if (threadIdx.x < stride) shared[threadIdx.x] += shared[threadIdx.x+stride];
}
__syncthreads(); return shared[0];
}
#endif
#if (__CUDACC_VER_MAJOR__ < 8) || ( defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600 )
__device__ double atomicAdd(double* address, double val) // allows to use atomicAdd operation for double precision floating point values
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 320 && MSbits == 64)
__device__ unsigned long long int atomicXor(unsigned long long int* address, unsigned long long int val) // allows to use atomicXor operation for 64-bit integers
{
unsigned int val1,val2; val1 = val & 0xFFFFFFFF; val2 = val>>32;
val1 = atomicXor((unsigned int*)address,val1);
val2 = atomicXor((unsigned int*)address+1,val2);
return (unsigned long long int)val1 + ((unsigned long long int)val2)<<32;
}
#endif
__global__ void ReplicaInit(Replica* Rd, int rg, int R, unsigned long long rng_seed, unsigned long long initial_sequence){ // initialization of spin lattices of all replicas
unsigned int B = blockIdx.x, t = threadIdx.x;
RNGState localrng; curand_init(rng_seed,initial_sequence+(t+B*EQthreads),0,&localrng);
for (unsigned int idx = t; idx < (N/2); idx += EQthreads){
#if MSbits == 8
Rd[B].gA[idx] = curand(&localrng) & 0xFF;
Rd[B].gB[idx] = curand(&localrng) & 0xFF;
#elif MSbits == 16
Rd[B].gA[idx] = curand(&localrng) & 0xFFFF;
Rd[B].gB[idx] = curand(&localrng) & 0xFFFF;
#elif MSbits == 32
Rd[B].gA[idx] = curand(&localrng);
Rd[B].gB[idx] = curand(&localrng);
#elif MSbits == 64
Rd[B].gA[idx] = (((unsigned long long int)curand(&localrng))<<32) + curand(&localrng) ;
Rd[B].gB[idx] = (((unsigned long long int)curand(&localrng))<<32) + curand(&localrng) ;
#endif
}
if(t < MSbits) if((B*MSbits+t)<R) Rd[B].isActive[t] = true; else Rd[B].isActive[t] = false;
}
// parallel spin update
__global__ void checkKerALL(Replica* Rd, int rg, unsigned int sweeps, unsigned long long rng_seed, unsigned long long initial_sequence) // equilibration process
{
MultiSpin mspin; unsigned int B = blockIdx.x, t = threadIdx.x, ran, idx, i1, i3, i4, tx, ty; // B is replica index
RNGState localrng; curand_init(rng_seed,initial_sequence+(t+blockIdx.x*EQthreads),0,&localrng);
for(int sweep=0; sweep<sweeps; sweep++){ // sweeps loop
// sublattice A
for (idx = t; idx < (N/2); idx += EQthreads){ // sublattice A
ty = idx / Ldiv2; tx = idx - ty * Ldiv2;
i1 = ty * Ldiv2 + ((ty&1) ? (tx + 1) : (tx + Ldiv2 - 1)) % Ldiv2;
i3 = ((ty + L - 1) % L) * Ldiv2 + tx; i4 = ((ty + 1) % L) * Ldiv2 + tx;
mspin = Rd[B].gA[idx];
// detecting anti-parallel orientations with NN (Ii = S ^ Ni)
MultiSpin I1 = mspin ^ Rd[B].gB[i1]; // left- or right-neighbour in B
MultiSpin I2 = mspin ^ Rd[B].gB[idx]; // right- or left-neighbour spins in the sublattice B
MultiSpin I3 = mspin ^ Rd[B].gB[i3]; // lower-neighbour spins in the sublattice B
MultiSpin I4 = mspin ^ Rd[B].gB[i4]; // upper-neighbour spins in the sublattice B
// performing summation of anti-parallel couplings
MultiSpin x12 = I1 ^ I2;
MultiSpin x34 = I3 ^ I4;
MultiSpin a12 = I1 & I2;
MultiSpin a34 = I3 & I4;
MultiSpin sum0 = x12 ^ x34;
MultiSpin sum1 = x12 & x34 ^ a12 ^ a34;
MultiSpin sum2 = a12 & a34;
MultiSpin cond4 = 0;
MultiSpin cond8 = 0; MultiSpin imask=0x1; ran = curand(&localrng);
for (unsigned char i = 0; i < MSbits; ++i){
cond4 |= (-(ran < tex1Dfetch(boltzT, 0))) & imask;
cond8 |= (-(ran < tex1Dfetch(boltzT, 1))) & imask;
imask <<= 1; ran = AA * ran + CC;
}
// acceptance mask
MultiSpin Acc = (sum1|sum2) | ( (~(sum1|sum2)) & ((sum0&cond4) | (~sum0&cond8)) );
// Metropolis update + store new configuration to global memory
Rd[B].gA[idx] = mspin ^ Acc;
}
__syncthreads();
// sublattice B
for (idx = t; idx < (N/2); idx += EQthreads){ // sublattice B
ty = idx / Ldiv2; tx = idx - ty * Ldiv2;
i1 = ty * Ldiv2 + ((ty&1) ? (tx + Ldiv2 - 1) : (tx + 1)) % Ldiv2;
i3 = ((ty + L - 1) % L) * Ldiv2 + tx; i4 = ((ty + 1) % L) * Ldiv2 + tx;
mspin = Rd[B].gB[idx];
MultiSpin I1 = mspin ^ Rd[B].gA[i1]; // left- or right-neighbour in A
MultiSpin I2 = mspin ^ Rd[B].gA[idx];// right- or left-neighbour spins in the sublattice A
MultiSpin I3 = mspin ^ Rd[B].gA[i3]; // lower-neighbour spins in the sublattice A
MultiSpin I4 = mspin ^ Rd[B].gA[i4]; // upper-neighbour spins in the sublattice A
MultiSpin x12 = I1 ^ I2;
MultiSpin x34 = I3 ^ I4;
MultiSpin a12 = I1 & I2;
MultiSpin a34 = I3 & I4;
MultiSpin sum0 = x12 ^ x34;
MultiSpin sum1 = x12 & x34 ^ a12 ^ a34;
MultiSpin sum2 = a12 & a34;
MultiSpin cond4 = 0;
MultiSpin cond8 = 0; MultiSpin imask=0x1; ran = curand(&localrng);
for (unsigned char i = 0; i < MSbits; ++i){
cond4 |= (-(ran < tex1Dfetch(boltzT, 0))) & imask;
cond8 |= (-(ran < tex1Dfetch(boltzT, 1))) & imask;
imask <<= 1; ran = AA * ran + CC;
}
MultiSpin Acc = (sum1|sum2) | ( (~(sum1|sum2)) & ((sum0&cond4) | (~sum0&cond8)) );
Rd[B].gB[idx] = mspin ^ Acc;
}
__syncthreads();
}
}
__global__ void energyKer(Replica* Rd) // calculation of energy and magnetization for each replica
{
int e, m; unsigned int t = threadIdx.x, idx, iL, iU, B = blockIdx.x, tx, ty;
MultiSpin sum0, sum1, sum2, sA, sB, Ai2, Bi2, Ai4, Bi4;
for (idx = t; idx < (N/2); idx += EQthreads){
if(t < EQthreads){
sA = Rd[B].gA[idx]; sB = Rd[B].gB[idx];
ty = idx / Ldiv2; tx = idx - ty * Ldiv2;
iL = ty * Ldiv2 + (tx + Ldiv2 - 1) % Ldiv2;
iU = ((ty + 1) % L) * Ldiv2 + tx;
if(ty&1){ Ai2 = sB; Bi2 = Rd[B].gA[iL]; }
else{ Ai2 = Rd[B].gB[iL]; Bi2 = sA; }
Ai4 = Rd[B].gB[iU]; Bi4 = Rd[B].gA[iU];
// detecting anti-parallel orientations
MultiSpin I1 = sA ^ Ai2;
MultiSpin I2 = sA ^ Ai4;
MultiSpin I3 = sB ^ Bi2;
MultiSpin I4 = sB ^ Bi4;
// performing summation of anti-parallel couplings
MultiSpin x12 = I1 ^ I2;
MultiSpin x34 = I3 ^ I4;
MultiSpin a12 = I1 & I2;
MultiSpin a34 = I3 & I4;
sum0 = x12 ^ x34;
sum1 = x12 & x34 ^ a12 ^ a34;
sum2 = a12 & a34;
}
// calculating energy contributions for replicas
for (unsigned char i = 0; i < MSbits; ++i){
if(t < EQthreads){
e = 2*((int)(sum0&0x1) + 2*(int)(sum1&0x1) + 4*(int)(sum2&0x1)) - 4;
m = 2*((int)(sA&0x1) + (int)(sB&0x1)) - 2;
} else e = m = 0;
e = blockReduceSum<int>(e); __syncthreads();
m = blockReduceSum<int>(m); __syncthreads();
if (t == 0){
if (idx==t){
Rd[B].IE[i] = e;
Rd[B].M[i] = m;
}else{
Rd[B].IE[i] += e;
Rd[B].M[i] += m;
}
}
// bit shift operation => moving to next replica in bit string
sum0 >>= 1; sum1 >>= 1; sum2 >>= 1;
sA >>= 1; sB >>= 1;
}
}
}
__global__ void QKer(Replica* Rd, int rg, double dB, double Emean, int CalcPart, double* Qd) // calculation of partition function ratio
{
if(CalcPart==0){ // first part of the calculation
double factor; int idx = blockIdx.x; int br = threadIdx.x; // summation of exponential
factor = Rd[idx].isActive[br] ? exp(-dB*(Rd[idx].IE[br]-Emean)) : 0.0 ; // Boltzmann-like factors
#if MSbits < 32
factor = smallblockReduceSum<double>(factor);
#else
factor = blockReduceSum<double>(factor);
#endif
if (br == 0) Rd[idx].parSum.ValDouble[0] = factor; // is saved to global memory
} else if(CalcPart==1){ // second part of the calculation
double factor; int t = threadIdx.x; int b = blockIdx.x;
int idx = t + Nthreads * b;
factor = (idx < rg) ? Rd[idx].parSum.ValDouble[0]: 0.0;
factor = blockReduceSum<double>(factor);
if(t == 0 ) Rd[idx].parSum.ValDouble[1] = factor; // sum for all threads in current block is saved to global memory
} else{ // third part of the calculation, summation of the partial sums
double factor; int j, t = threadIdx.x; double MyParSum = 0;
for (j=0; j*Nthreads < rg; j += Nthreads){
factor = (t+j)*Nthreads < rg ? Rd[(t+j)*Nthreads].parSum.ValDouble[1] : 0.0;
factor = blockReduceSum<double>(factor); __syncthreads();
MyParSum += factor;
}
if(t==0) *Qd = MyParSum;
}
}
__global__ void CalcTauKer(Replica* Rd, int Rinit, int R, int rg, double lnQ, double dB, unsigned long long rng_seed, unsigned long long initial_sequence) // calculation of numbers of copies for all replicas
{
int t = threadIdx.x; int b = blockIdx.x;
unsigned char br = blockIdx.y; // multispin replica index
int idx = t + Nthreads * b; double mu, mufloor;
if (idx < rg) if (Rd[idx].isActive[br]){ // nearest integer resampling
mu = ((double)Rinit)/R*exp(-dB*(double)Rd[idx].IE[br] - lnQ);
mufloor = floor(mu);
RNGState localrng; curand_init(rng_seed,initial_sequence+(br+MSbits*idx),0,&localrng);
if(curand_uniform_double(&localrng) < (mu-mufloor))
Rd[idx].Roff[br] = mufloor + 1;
else Rd[idx].Roff[br] = mufloor; // number of copies
} else Rd[idx].Roff[br] = 0;
}
__global__ void CalcParSum(Replica* Rd, int rg, int CalcPart, int* Rnew)
{
if(CalcPart==0){ // first part of the calculation
unsigned int parS; int t = threadIdx.x; int b = blockIdx.x;
parS = Rd[b].Roff[t]; // (Rd[b].Roff[0] + Rd[b].Roff[1] + ... + Rd[b].Roff[MSbits-1]) is saved to global memory
#if MSbits < 32
parS = smallblockReduceSum<unsigned int>(parS);
#else
parS = blockReduceSum<unsigned int>(parS);
#endif
if(t==0) Rd[b].parSum.ValInt[MSbits] = parS;
} else if(CalcPart==1){ // second part of the calculation
unsigned int parS; int t = threadIdx.x; int b = blockIdx.x; int idx = t + b*Nthreads;
parS = (idx < rg) ? Rd[idx].parSum.ValInt[MSbits] : 0;
parS = blockReduceSum<unsigned int>(parS);
// sum of partial sums for replica groups b*Nthreads,b*Nthreads+1,...,(b+1)*Nthreads-1 is saved to global memory.
if(t==0) Rd[idx].parSum.ValInt[MSbits+1] = parS;
} else{ // third part of the calculation
unsigned int parS; int j, t = threadIdx.x, b = blockIdx.x;
unsigned char br = blockIdx.y; __shared__ unsigned int val;
int idx = t + Nthreads * b; unsigned int MyParSum = 0;
for (j = 0; j<b; j+=Nthreads){ // we sum of Roff for all blocks from 0 to (b-1) and for all multi-spin indices.
parS = (t+j < b) ? Rd[(t+j)*Nthreads].parSum.ValInt[MSbits+1] : 0;
parS = blockReduceSum<unsigned int>(parS);
if(t==0) val = parS; __syncthreads(); MyParSum += val;
}
if(idx < rg){
for(j=Nthreads*b;j<idx;j++) MyParSum+=Rd[j].parSum.ValInt[MSbits]; // we add parSum[MSbits] for current block threads from 0 to (t-1)
for(j=0;j<br;j++) MyParSum+=Rd[idx].Roff[j]; // we add Roff for j = 0,1,..., br-1.
Rd[idx].parSum.ValInt[br] = MyParSum; // we save partial sum
if(idx==(rg-1)) if(br==(MSbits-1)) *Rnew = MyParSum + Rd[idx].Roff[br]; // we save new population size
}
}
}
__global__ void resampleKer(Replica* Rd, Replica* RdNew, int rg) // renumeration and copying of the replicas (the main part of the resampling process)
{
int t = threadIdx.x + blockIdx.z*blockDim.x; // index of spin variable (from 0 -> N/2-1)
int bx = blockIdx.x; // represents index of group of replicas (j)
signed char by = blockIdx.y; // represents index of replica in group/word (k)
int it_k, it_j;
#if MSbits == 64
unsigned long long int mask = 0x1; mask <<= by; // mask for selecting spin from old population
unsigned long long int copy_sourceA = mask & Rd[bx].gA[t]; // selected spin from sublattice A
unsigned long long int copy_sourceB = mask & Rd[bx].gB[t]; // and B
#else
unsigned int mask = 0x1; mask <<= by; // mask for selecting spin from old population
unsigned int copy_sourceA = mask & Rd[bx].gA[t]; // selected spin from sublattice A
unsigned int copy_sourceB = mask & Rd[bx].gB[t]; // and B
#endif
for (int p = 0; p < Rd[bx].Roff[by]; ++p){
it_k = (Rd[bx].parSum.ValInt[by] + p) / rg;
it_j = (Rd[bx].parSum.ValInt[by] + p) % rg;
#if MSbits == 8
mask = 0x1; mask <<= (it_k + ((t&3)<<3));
if(copy_sourceA!=0) atomicXor((unsigned int*)&(RdNew[it_j].gA[t-(t&3)]),mask);
if(copy_sourceB!=0) atomicXor((unsigned int*)&(RdNew[it_j].gB[t-(t&3)]),mask);
#elif MSbits == 16
mask = 0x1; mask <<= (it_k + ((t&1)<<4));
if(copy_sourceA!=0) atomicXor((unsigned int*)&(RdNew[it_j].gA[t-(t&1)]),mask);
if(copy_sourceB!=0) atomicXor((unsigned int*)&(RdNew[it_j].gB[t-(t&1)]),mask);
#elif MSbits == 32
mask = 0x1; mask <<= it_k;
if(copy_sourceA!=0) atomicXor((unsigned int*)&(RdNew[it_j].gA[t]),mask);
if(copy_sourceB!=0) atomicXor((unsigned int*)&(RdNew[it_j].gB[t]),mask);
#elif MSbits == 64
mask = 0x1; mask <<= it_k;
if(copy_sourceA!=0) atomicXor((unsigned long long int*)&(RdNew[it_j].gA[t]),mask);
if(copy_sourceB!=0) atomicXor((unsigned long long int*)&(RdNew[it_j].gB[t]),mask);
#endif
if(t==0) RdNew[it_j].isActive[it_k] = true;
else if(t==1) RdNew[it_j].IE[it_k] = Rd[bx].IE[by];
}
}
__global__ void CalcAverages(Replica* Repd, int rg, double* Averages) // calculation of observables via averaging over the population
{
int t = threadIdx.x, b = blockIdx.x, by = blockIdx.y; int idx = t + Nthreads * b;
double currE,currE2,currM,currM2,currM4;
if(idx<rg) if(Repd[idx].isActive[by]){
currE = Repd[idx].IE[by]; currM = Repd[idx].M[by]; if(currM<0) currM=-currM;
} else{ currE = 0; currM = 0;} else{ currE = 0; currM = 0;}
currE2 = currE*currE; currM2 = currM*currM; currM4 = currM2*currM2;
currE = blockReduceSum<double>(currE); if(t==0) atomicAdd(&Averages[0], currE); __syncthreads();
currE2 = blockReduceSum<double>(currE2); if(t==0) atomicAdd(&Averages[1], currE2); __syncthreads();
currM = blockReduceSum<double>(currM); if(t==0) atomicAdd(&Averages[2], currM); __syncthreads();
currM2 = blockReduceSum<double>(currM2); if(t==0) atomicAdd(&Averages[3], currM2); __syncthreads();
currM4 = blockReduceSum<double>(currM4); if(t==0) atomicAdd(&Averages[4], currM4);
}
#ifdef MHR
__global__ void UpdateShistE(Replica* Repd, int rg, int* ShistE) // adding energy histogram of the current temperature step for the MHR analysis
{
int t = threadIdx.x, b = blockIdx.x, by = blockIdx.y; int idx = t + Nthreads * b;
if(idx<rg) if(Repd[idx].isActive[by]){
atomicAdd(&ShistE[(2*N+Repd[idx].IE[by])/4],1);
}
}
#endif
#ifdef AdaptiveStep
__global__ void HistogramOverlap(Replica* Repd, int Rinit, int R, int rg, double lnQ, double dB, double* overlap) // calculating histogram overlap
{
double PartialOverlap;
int t = threadIdx.x, idx = threadIdx.x + Nthreads * blockIdx.x, by = blockIdx.y;
if(idx<rg && Repd[idx].isActive[by])
PartialOverlap = min(1.0,((double)Rinit)/R*exp(-dB*(double)Repd[idx].IE[by] - lnQ));
else PartialOverlap = 0;
PartialOverlap = blockReduceSum<double>(PartialOverlap);
if(t==0) atomicAdd(overlap,PartialOverlap);
}
double CalcOverlap(Replica* Rep_d, double dB, int R, double Emean){ // Calculates histogram overlap
double q, lnQ, ioverlaph;
int rg = (int)ceil(R/(float)MSbits);
int NblocksR = (int)ceil(rg/(double)Nthreads);
dim3 DimGridR(NblocksR,MSbits,1);
QKer <<< rg, MSbits >>> (Rep_d, rg, dB, Emean, 0, Qd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
QKer <<< NblocksR, Nthreads >>> (Rep_d, rg, dB, Emean, 1, Qd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
QKer <<< 1, Nthreads >>> (Rep_d, rg, dB, Emean, 2, Qd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CUDAErrChk( cudaMemcpy(&q,Qd,sizeof(double),cudaMemcpyDeviceToHost) );
lnQ = -dB * Emean + log(q) - log((double)R);
CUDAErrChk( cudaMemset(ioverlapd, 0, sizeof(double)) );
HistogramOverlap<<<DimGridR,Nthreads>>>(Rep_d, Rinit, R, rg, lnQ, dB, ioverlapd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CUDAErrChk( cudaMemcpy(&ioverlaph,ioverlapd,sizeof(double),cudaMemcpyDeviceToHost) );
return (double)ioverlaph/R;
}
#endif
char *optarg; int opterr = 1, optind = 1, optopt, optreset;
int getopt(int nargc, char * const nargv[], const char *ostr)
{
static char *place = (char*)""; const char *oli;
if (optreset || !*place) {
optreset = 0;
if (optind >= nargc || *(place = nargv[optind]) != '-') { place = (char*)""; return (-1); }
if (place[1] && *++place == '-') { ++optind; place = (char*)""; return (-1); }
}
if ((optopt = (int)*place++) == (int)':' || !(oli = strchr(ostr, optopt))) {
if (optopt == (int)'-') return (-1);
if (!*place) ++optind;
if (opterr && *ostr != ':') (void)printf("illegal option -- %c\n", optopt);
return ((int)'?');
}
if (*++oli != ':') { optarg = NULL; if (!*place) ++optind; }
else {
if (*place) optarg = place; else if (nargc <= ++optind) {
place = (char*)""; if (*ostr == ':') return ((int)':');
if (opterr) (void)printf("option requires an argument -- %c\n", optopt);
return ((int)'?');
}
else optarg = nargv[optind];
place = (char*)""; ++optind;
}
return (optopt);
}
void PrintParameterUsage(){
cout << " Usage: PAisingMSC [options]\n"
<< " Note: all of the options are optional. Default parameter values are listed in the head of the source code. \n"
<< " Possible command line options are:\n\n"
<< " -R Rinit ( Rinit = initial size of population of replicas )\n"
<< " -t EQsweeps ( EQsweeps = number of equilibration sweeps )\n"
<< " -d dBinit ( dBinit = inverse temperature step )\n"
<< " -f Bfin ( Bfin = final value of inverse temperature )\n"
<< " -M runs ( runs = number of population annealing algorithm independent runs )\n"
<< " -s RNGseed ( RNGseed = seed for random number generation )\n"
<< " -P OutputPrecision ( OutputPrecision = precision (number of digits) of the output )\n"
<< " -o dataDirectory ( dataDirectory = data directory name )\n";
}
int main(int argc, char** argv)
{
// data directory name + create
char dataDir[200]; unsigned long long rng_seed = RNGseed; int optdir = 0;
int optc, opti; double optf;
while ((optc = getopt (argc, argv, "R:t:d:f:M:s:P:o:?")) != -1) // Processing optional command line options
switch (optc)
{
case 'R': opti = atoi(optarg); if(opti) Rinit = opti; break; // -R Rinit
case 't': opti = atoi(optarg); EQsweeps = opti; break; // -t EQsweeps
case 'd': optf = atof(optarg); if(optf > 0.0) dBinit = optf; break; // -d dBinit
case 'f': optf = atof(optarg); if(optf > 0.0) Bfin = optf; break; // -f Bfin
case 'M': opti = atoi(optarg); if(opti) runs = opti; break; // -M runs
case 's': opti = atoi(optarg); if(opti) rng_seed = opti; break; // -s RNGseed
case 'P': opti = atoi(optarg); if(opti) OutputPrecision = opti; break; // -P OutputPrecision
case 'o': if(optarg[strlen(optarg)-1]=='/') sprintf(dataDir,"%s",optarg); // -o dataDir
else sprintf(dataDir,"%s/",optarg); optdir = 1; break;
case '?': PrintParameterUsage(); return 1;
}
if(optind < argc){
for (opti = optind; opti < argc; opti++) fprintf(stderr,"Non-option argument %s\n", argv[opti]);
return 1;
}
#ifdef AdaptiveStep
if(!optdir) sprintf(dataDir, "./dataMSC_L%d_R%d_EqSw%d/", L, Rinit, EQsweeps);
#else
if(!optdir) sprintf(dataDir, "./dataMSC_L%d_R%d_EqSw%d_dB%f/", L, Rinit, EQsweeps, dBinit);
#endif
#if defined(_WIN32)
_mkdir(dataDir);
#else
mkdir(dataDir, 0777);
#endif
int rmin=0, rmax=runs-1; unsigned long long initial_sequence = 0; int rg;
double B[nBmax], Binc[nBmax]; B[0]=Binc[0]=Binit; double totPop=0;
// creating data arrays for thermodynamic variables and errors
double E[nBmax]; double M[nBmax]; double M2[nBmax]; double M4[nBmax];
double C[nBmax];
double lnQ[nBmax]; // partition function ratio
double S[nBmax]; // entropy
double BF[nBmax]; // dimensionless free energy estimate
BF[0] = - N*log(2.0); // its value at infinite temperature
int R[nBmax]; // population size
int nB;
// CUDAErrChk( cudaSetDevice(0) ); // uncomment to explicitly select device number in a setup with multiple cards
CUDAErrChk(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); // prefer larger L1 cache and smaller shared memory
// GPU execution time
cudaEvent_t start, stop; float Etime;
CUDAErrChk( cudaEventCreate(&start) );
CUDAErrChk( cudaEventCreate(&stop) );
// start evaluation time measurement
cudaEventRecord(start, 0);
double *Averages; double Averages_h[5]; int* Ridev;
CUDAErrChk( cudaMalloc((void**)&Averages,5*sizeof(double)) );
CUDAErrChk( cudaMalloc((void**)&Qd,sizeof(double)) );
CUDAErrChk( cudaMalloc((void**)&Ridev,sizeof(int)) );
CUDAErrChk( cudaMalloc((void**)&ioverlapd,sizeof(double)) );
// random seed
cout <<"RNG initial seed: "<< rng_seed<<"\n";
R[0] = Rinit;
cout << "Memory use of one replica: " << sizeof(Replica) / 1024.0 / (double)MSbits << " kB \n";
cout << "Memory use of the entire population of " << R[0] << " replicas: "
<< ceil(R[0]/(double)MSbits)*sizeof(Replica) / 1024.0 / 1024.0 << " MB \n"; fflush(stdout);
// creating energy spectrum for multi-histogram reweighting
#ifdef MHR
int Ei[N+1];
for (int i = 0; i < N+1; ++i){
Ei[i] = 4*i - 2*N;
}
#endif
Replica* Rep_d;
unsigned int boltzGPU[boltzTableL]; // Boltzman factor table - host version
unsigned int* boltztext;
// memory allocation for Boltzmann factor table
CUDAErrChk( cudaMalloc((void **)&boltztext, boltzTableL * sizeof(unsigned int)) );
// binding references (global & texture memory buffers)
CUDAErrChk( cudaBindTexture(NULL,boltzT,boltztext,boltzTableL * sizeof(unsigned int)) );
int Ethreads = 1; while(Ethreads < EQthreads) Ethreads <<= 1;
for (int r = rmin; r <= rmax; ++r){
rg = (int)ceil(R[0]/(float)MSbits); // number of replica groups (R / MSbits)
double sumlnQ = 0.0; double q; double Emean = 0.0;
CUDAErrChk( cudaMalloc((void **)&Rep_d,rg*sizeof(Replica)) );
int NblocksR = (int)ceil(rg/(float)Nthreads);
ReplicaInit <<< rg, EQthreads >>> (Rep_d,rg,R[0],rng_seed,initial_sequence); initial_sequence+=rg*EQthreads;
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
// compute energy of all replicas at zero temperature (for 1st resampling)
energyKer <<< rg, Ethreads >>> (Rep_d);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
// array for summing the energy histograms over inverse temperatures
#ifdef MHR
int ShistE[N+1]; int* ShistEd;
CUDAErrChk( cudaMalloc((void**)&ShistEd,(N+1)*sizeof(int)) );
CUDAErrChk( cudaMemset(ShistEd,0,(N+1)*sizeof(int)) );
dim3 DimGridR(NblocksR,MSbits,1);
UpdateShistE<<<DimGridR,Nthreads>>> (Rep_d, rg, ShistEd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
#endif
// ------------------------------------------------------------------
// population annealing
// ------------------------------------------------------------------
int i=1, iprev=0; double deltaBeta=dBinit; B[i]=Binc[i]=B[iprev]+deltaBeta;
while(B[i]<=Bfin) {
// Boltzmann factor tabulation (only two are relevant: exp(-4*B);exp(-8*B))
boltzGPU[0] = ceil(4294967296.*exp(-4*B[i]));
boltzGPU[1] = ceil(4294967296.*exp(-8*B[i]));
// copying table to texture memory - boltztext is bounded with boltzT
CUDAErrChk( cudaMemcpy(boltztext, boltzGPU, boltzTableL * sizeof(unsigned int),cudaMemcpyHostToDevice) );
// compute the partition function ratio - Q
NblocksR = (int)ceil(rg/(float)Nthreads);
dim3 DimGridR(NblocksR,MSbits,1);
QKer <<< rg, MSbits >>> (Rep_d, rg, B[i] - B[i-1], Emean, 0, Qd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
QKer <<< NblocksR, Nthreads >>> (Rep_d, rg, B[i] - B[i-1], Emean, 1, Qd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
QKer <<< 1, Nthreads >>> (Rep_d, rg, B[i] - B[i-1], Emean, 2, Qd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CUDAErrChk( cudaMemcpy(&q,Qd,sizeof(double),cudaMemcpyDeviceToHost) );
lnQ[i] = -(B[i] - B[i-1])*Emean + log(q) -log((double)R[i-1]);
CalcTauKer <<< DimGridR, Nthreads >>> (Rep_d, Rinit, R[i-1], rg, lnQ[i], B[i] - B[i-1],rng_seed,initial_sequence); initial_sequence+=rg*MSbits;
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
// resampling new population
CalcParSum <<< rg, MSbits >>> (Rep_d, rg, 0, Ridev);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CalcParSum <<< NblocksR, Nthreads >>> (Rep_d, rg, 1, Ridev);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CalcParSum <<< DimGridR, Nthreads >>> (Rep_d, rg, 2, Ridev);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CUDAErrChk( cudaMemcpy(&R[i], Ridev, sizeof(int),cudaMemcpyDeviceToHost) );
dim3 DimGridRes(rg,MSbits,N/2/EQthreads); // resampleKer configuration with old value of rg
rg = (int)ceil(R[i]/(float)MSbits); // updated number of replica groups
DimGridR.x = NblocksR = (int)ceil(rg/(float)Nthreads); Replica* RepNew_d;
CUDAErrChk( cudaMalloc((void**)&RepNew_d,rg*sizeof(Replica)) );
CUDAErrChk( cudaMemset(RepNew_d,0,rg*sizeof(Replica)) );
CUDAErrChk( cudaDeviceSynchronize() );
resampleKer <<< DimGridRes, EQthreads >>> (Rep_d, RepNew_d, rg);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
Replica* RepDel = Rep_d;
Rep_d = RepNew_d;
CUDAErrChk( cudaFree(RepDel) );
// equilibrate replicas for certain number of sweeps
checkKerALL <<< rg, EQthreads >>> (Rep_d,rg,EQsweeps,rng_seed,initial_sequence); initial_sequence+=rg*EQthreads;
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
// compute observables (E,M,O,F)
// compute energy and magnetization of all replicas
energyKer <<< rg, Ethreads >>> (Rep_d);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
// saving results - energies
#ifdef EnergiesPopStore
Replica* Rep_h = (Replica*)malloc(rg*sizeof(Replica));
CUDAErrChk( cudaMemcpy(Rep_h, Rep_d, rg*sizeof(Replica),cudaMemcpyDeviceToHost) );
ofstream results;
char str[100];
char str2[100];
strcpy(str, dataDir);
sprintf(str2,"PA_energies_%d.dat",i);
strcat(str,str2);
results.open(str);
results.precision(OutputPrecision);
for (int j = 0; j < rg; ++j)
for (int l = 0; l < MSbits; ++l)
if(Rep_h[j].isActive[l]) results << Rep_h[j].IE[l] << " ";
results.close(); free(Rep_h);
#endif
#ifdef MHR
UpdateShistE<<<DimGridR,Nthreads>>>(Rep_d, rg, ShistEd);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
#endif
CUDAErrChk( cudaMemset(Averages, 0, 5*sizeof(double)) );
CalcAverages<<<DimGridR,Nthreads>>>(Rep_d,rg,Averages);
CUDAErrChk( cudaPeekAtLastError() );
CUDAErrChk( cudaDeviceSynchronize() );
CUDAErrChk( cudaMemcpy(Averages_h,Averages,5*sizeof(double),cudaMemcpyDeviceToHost) );
E[i] = Emean = Averages_h[0] / R[i];
C[i] = (Averages_h[1] / R[i] - E[i]*E[i]) * B[i] * B[i];
M[i] = Averages_h[2] / R[i];
M2[i] = Averages_h[3] / R[i];
M4[i] = Averages_h[4] / R[i];
// dimensionless free energy
sumlnQ -= lnQ[i];
BF[i] = - N*log(2.0) + sumlnQ;
// entropy
S[i] = B[i]*E[i] - BF[i];
iprev=i; totPop+=R[i]; i++;
if(i>=nBmax){
#ifdef AdaptiveStep
fprintf(stderr,"Error: number of temperature steps exceeds nBmax=%d.\n Please consider increasing the population size or decreasing the value of MinOverlap or increasing the value of nBmax.\n",nBmax);
#else
fprintf(stderr,"Error: number of temperature steps exceeds nBmax=%d.\n Please consider increasing the inverse temperature step or increasing the value of nBmax.\n",nBmax);
#endif
return 1;
}
if (r==rmin){
#ifdef AdaptiveStep
double overlap, dBmin = 0, dBmax = deltaBeta, dBmean;
while(1){
overlap = CalcOverlap ( Rep_d, dBmax, R[iprev], Emean );
if ( (overlap >= MaxOverlap) && (B[iprev] + dBmax < Bfin) ) dBmax *= 1.1; else break;
}
if ( overlap >= MinOverlap ) dBmean = dBmax;
else while(1){ // obtaining optimal inverse temperature step with the bisection method
dBmean = 0.5 * (dBmin + dBmax);
overlap = CalcOverlap ( Rep_d, dBmean, R[iprev], Emean );
if ( overlap < MinOverlap ) dBmax = dBmean;
else if ( overlap >= MaxOverlap ) dBmin = dBmean;
else break;
}
if( (B[iprev] < Bfin) && (B[iprev] + dBmean > Bfin) ) deltaBeta = Bfin - B[iprev]; else deltaBeta = dBmean;
#endif
B[i] = Binc[i] = B[iprev] + deltaBeta;
} else B[i]=Binc[i];
}
CUDAErrChk( cudaFree(Rep_d) );
nB=i;
// saving results
{
ofstream results;
char str[100];
char str2[100];
strcpy(str, dataDir);
sprintf(str2, "PA_results_run_%d.dat", r);
strcat(str,str2);
results.open(str);
results.precision(OutputPrecision);
for (int i = 0; i < nB; ++i) {
results << B[i] << " "
<< E[i] / N << " "
<< C[i] / N << " "
<< M[i] / N << " "
<< M2[i] / N / N << " "
<< M4[i] / N / N / N / N << " "
<< BF[i] / N << " "
<< S[i] / N << " "
<< R[i] << " "
<< lnQ[i] << "\n";
}
results.close();
}
// multi-histogam reweighting (MHR) analysis
#ifdef MHR
// declaring arrays used in MHR analysis
double lnOmega[N+1];
double E_MHR[nB*MHR_Niter];
double C_MHR[nB*MHR_Niter];
double BF_MHR[nB*MHR_Niter];
bool relTerm[N+1];
CUDAErrChk( cudaMemcpy(ShistE,ShistEd,(N+1)*sizeof(int),cudaMemcpyDeviceToHost) );
for (int l = 0; l < MHR_Niter; ++l){
// calculate lnOmega
double Sigma[nB];
double mSigma;
for (int k = 0; k < N+1; ++k){
// maxima of -S = BF - B*E
Sigma[0] = BF[0]-B[0]*Ei[k];
mSigma = Sigma[0];
for (int i = 1; i < nB; ++i){
Sigma[i] = BF[i]-B[i]*Ei[k];
if (mSigma < Sigma[i]){
mSigma = Sigma[i];
}
}
double sD = 0;
for (int i = 0; i < nB; ++i){
sD += R[i]*exp(Sigma[i]-mSigma);
}
if ((ShistE[k] == 0) || (sD == 0)){
relTerm[k] = false;
lnOmega[k] = 0;
} else {
relTerm[k] = true;
lnOmega[k] = log(ShistE[k]) - mSigma - log(sD);
}
}
// reweigting of observables
double expOm[N+1];
double Om[N+1];
double mOm;
for (int i = 0; i < nB; ++i){
// determine the maxima of the reweighting exponent
mOm = lnOmega[0] - B[i]*Ei[0];
for (int k = 0; k < N+1; ++k){
Om[k] = lnOmega[k] - B[i]*Ei[k];
if (mOm < Om[k]){
mOm = Om[k];
}
}
// calculate reweighting exponentials
double p = 0;
for (int k = 0; k < N+1; ++k){
expOm[k] = exp(Om[k] - mOm);
if (relTerm[k])
p += expOm[k];
}
double s = 0;
for (int k = 0; k < N+1; ++k){
if (relTerm[k])
s += Ei[k]*expOm[k];
}
E_MHR[i+l*nB] = s / p / N;
BF_MHR[i+l*nB] = - mOm - log(p);
BF[i] = BF_MHR[i+l*nB];
s = 0;
for (int k = 0; k < N+1; ++k){
if (relTerm[k])
s += pow(Ei[k]-E_MHR[i+l*nB]*N,2)*expOm[k];
}
C_MHR[i+l*nB] = B[i]*B[i] * s / p / N;
}
}
// saving results
{
ofstream results;
char MHRDataFile[100];
char str2[100];
strcpy(MHRDataFile, dataDir);
sprintf(str2,"PA_MHR_results_run_%d.dat",r);
strcat(MHRDataFile,str2);
results.open(MHRDataFile);
results.precision(OutputPrecision);
for (int i = 0; i < nB; ++i){
results << B[i] << " ";
for (int l = 0; l < MHR_Niter; ++l){
results << E_MHR[i+l*nB] << " ";
results << C_MHR[i+l*nB] << " ";
results << BF_MHR[i+l*nB] / N << " ";
}
results << "\n";
}
results.close();
}
CUDAErrChk( cudaFree(ShistEd) );
#endif
}
CUDAErrChk( cudaFree(Averages) );
CUDAErrChk( cudaFree(Ridev) );
CUDAErrChk( cudaFree(Qd) );
CUDAErrChk( cudaFree(ioverlapd) );
CUDAErrChk( cudaUnbindTexture(boltzT) );
CUDAErrChk( cudaFree(boltztext));
CUDAErrChk( cudaDeviceSynchronize() );
CUDAErrChk( cudaEventRecord(stop, 0) );
CUDAErrChk( cudaEventSynchronize(stop) );
CUDAErrChk( cudaEventElapsedTime(&Etime, start, stop) );
cout << "Elapsed time: " << setprecision(8) << Etime/1000 << " s\n";
cout << "Time per spin-flip: " << setprecision(8) << Etime*1e6/EQsweeps/N/totPop << " ns\n";
CUDAErrChk( cudaEventDestroy(start) );
CUDAErrChk( cudaEventDestroy(stop) );
return 0;
}
|
c10f52f8965c6c13cc2f18c797b7138de1e9a4e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j2d9pt-512-8-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
| c10f52f8965c6c13cc2f18c797b7138de1e9a4e2.cu | #include "j2d9pt-512-8-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
35d456a442091dca1bd326f2e49ed77f7956e5d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
#include "matrix.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaKernels.h"
#include "cudaCommon.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=3) && ((nlhs != 1) && (nrhs != 2)))
mexErrMsgTxt("Wrong number of arguments: 1lhs + 2rhs or 3rhs acceptable");
/* mex parameters are:
0 Source data
1 Destination data
2 Coefficients [4]
*/
ArrayMetadata amd;
// Sort out source/dest stuff
double **srcArray = getGPUSourcePointers(prhs, &amd, 0,0);
double **dstArray;
double *opCoeffs;
if(nrhs == 3) {
dstArray = getGPUSourcePointers(prhs, &amd, 1, 1);
opCoeffs = mxGetPr(prhs[2]);
} else {
//dstArray = create LHS
// U = teh fu><X0r3dz, we no support diz
opCoeffs = mxGetPr(prhs[1]);
}
// Get some control variables sorted out
int *dims = amd.dim;
dim3 gridsize;
gridsize.x = dims[0]*dims[2]/64;
gridsize.y = dims[1]/8;
gridsize.z = 1;
dim3 blocksize; blocksize.x = blocksize.y = 10; blocksize.z = 1;
int nx = dims[0];
int ny = dims[1];
int nz = dims[2]/8 - 1;
hipLaunchKernelGGL(( SymmetricOperatorKernel), dim3(gridsize), dim3(blocksize), 0, 0, srcArray[0], dstArray[0], nx, ny, nz, opCoeffs[0], opCoeffs[1], opCoeffs[2], opCoeffs[3]);
}
| 35d456a442091dca1bd326f2e49ed77f7956e5d8.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
#include "matrix.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaKernels.h"
#include "cudaCommon.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if ((nrhs!=3) && ((nlhs != 1) && (nrhs != 2)))
mexErrMsgTxt("Wrong number of arguments: 1lhs + 2rhs or 3rhs acceptable");
/* mex parameters are:
0 Source data
1 Destination data
2 Coefficients [4]
*/
ArrayMetadata amd;
// Sort out source/dest stuff
double **srcArray = getGPUSourcePointers(prhs, &amd, 0,0);
double **dstArray;
double *opCoeffs;
if(nrhs == 3) {
dstArray = getGPUSourcePointers(prhs, &amd, 1, 1);
opCoeffs = mxGetPr(prhs[2]);
} else {
//dstArray = create LHS
// U = teh fu><X0r3dz, we no support diz
opCoeffs = mxGetPr(prhs[1]);
}
// Get some control variables sorted out
int *dims = amd.dim;
dim3 gridsize;
gridsize.x = dims[0]*dims[2]/64;
gridsize.y = dims[1]/8;
gridsize.z = 1;
dim3 blocksize; blocksize.x = blocksize.y = 10; blocksize.z = 1;
int nx = dims[0];
int ny = dims[1];
int nz = dims[2]/8 - 1;
SymmetricOperatorKernel<<<gridsize, blocksize>>>(srcArray[0], dstArray[0], nx, ny, nz, opCoeffs[0], opCoeffs[1], opCoeffs[2], opCoeffs[3]);
}
|
07e5d29ee2c601bcaefba08f2fa53d98e95cde29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdbool.h>
extern "C" {
#include "util.h"
#include "mmio_wrapper.h"
#include "parameters.h"
#include "second.h"
}
#define NUM_THREADS 256
#define NUM_BLOCKS 32
// Macros to simplify kernels
#define THREAD_ID threadIdx.x+blockIdx.x*blockDim.x
#define THREAD_COUNT gridDim.x*blockDim.x
// tolerance and maximum iterations
#define epsilon 1e-10
#define IMAX 1000
// Error function
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double * init_source_term(int n, double h){
double * f;
int i;
f = (double*) malloc(n*sizeof(double*));
for(i = 0; i < n; i++) {
f[i] = (double)i * -2. * M_PI * M_PI * sin(10.*M_PI*i*h) * sin(10.*M_PI*i*h);
}
return f;
}
// use to keep the partial result in the dot product
double* partial_result;
/*
* Kernel function to compute the partial dot product
* The partial result is stored in partial.
*/
__global__ void vecdot_partial(int n, double* vec1, double* vec2, double* partial)
{
__shared__ double tmp[NUM_THREADS];
tmp[threadIdx.x] = 0;
for (int i=THREAD_ID; i<n; i+=THREAD_COUNT)
{
tmp[threadIdx.x] += vec1[i]*vec2[i];
}
for (int i=blockDim.x/2;i>=1;i = i/2)
{
__syncthreads();
if (threadIdx.x < i)
{
tmp[threadIdx.x] += tmp[i + threadIdx.x];
}
}
if (threadIdx.x == 0)
{
partial[blockIdx.x] = tmp[0];
}
}
/*
* Kernel function to reduces the output of the vecdot_partial kernel to a single value.
* The result is stored in result.
*/
__global__ void vecdot_reduce(double* partial, double* result)
{
__shared__ double tmp[NUM_BLOCKS];
if (threadIdx.x < NUM_BLOCKS)
tmp[threadIdx.x] = partial[threadIdx.x];
else
tmp[threadIdx.x] = 0;
for (int i=blockDim.x/2;i>=1;i = i/2) {
__syncthreads();
if (threadIdx.x < i)
tmp[threadIdx.x] += tmp[i + threadIdx.x];
}
if (threadIdx.x == 0)
*result = tmp[0];
}
/*
* Function to perform the dot product
*/
void dot(int n, double* vec1, double* vec2, double* result)
{
dim3 BlockDim(NUM_THREADS);
dim3 GridDim(NUM_BLOCKS);
// call the kernel function
hipLaunchKernelGGL(( vecdot_partial), dim3(GridDim),dim3(BlockDim), 0, 0, n, vec1, vec2, partial_result);
hipLaunchKernelGGL(( vecdot_reduce), dim3(1),dim3(NUM_BLOCKS), 0, 0, partial_result, result);
}
/*
* Kernel function to perform a matrice vector multiplication
*/
__global__ void mat_vec_mul_kernel(double *device_Mat, double *device_Vect,int matRowSize, int vlength, double *device_ResVect)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int tindex=tidx+gridDim.x*NUM_BLOCKS*tidy;
if(tindex<matRowSize)
{
int i;int m=tindex*vlength;
device_ResVect[tindex]=0.00;
for(i=0;i<vlength;i++)
device_ResVect[tindex]+=device_Mat[m+i]*device_Vect[i];
}
__syncthreads();
}
/*function to launch kernel*/
void mat_vec_mul(double *device_Mat, double *device_Vect,int matRowSize, int vlength,double *device_ResVect)
{
int max=NUM_BLOCKS*NUM_BLOCKS;
int BlocksPerGrid=matRowSize/max+1;
dim3 dimBlock(NUM_BLOCKS,NUM_BLOCKS);
if(matRowSize%max==0)BlocksPerGrid--;
dim3 dimGrid(1,BlocksPerGrid);
hipLaunchKernelGGL(( mat_vec_mul_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, device_Mat,device_Vect,matRowSize,vlength,device_ResVect);
}
/*
* Compute a simple scalar division on the device
*/
__global__ void scalardiv(double* num, double* den, double* result)
{
if(threadIdx.x==0 && blockIdx.x==0)
*result = (*num)/(*den);
}
/*
* Computes r= a*x+y for vectors x and y, and scalar a.
* n is the size of the vector
*/
__global__ void axpy(int n, double* a, double* x, double* y, double* r)
{
for (int i=THREAD_ID; i<n; i+=THREAD_COUNT)
r[i] = y[i] + (*a)*x[i];
}
/*
* Computes y= y-a*x forcvectors x and y, and scalar a.
* n is the size of the vector
*/
__global__ void ymax(int n, double* a, double* x, double* y)
{
for (int i=THREAD_ID; i<n; i+=THREAD_COUNT)
y[i] = y[i] - (*a)*x[i];
}
int main ( int argc, char **argv ) {
double * host_A;
double * host_x;
double * host_b;
double t1,t2;
int m_rows,n_cols;
struct size_m sA;
double h;
if (argc < 2)
{
fprintf(stderr, "Usage: %s [martix-market-filename]\n", argv[0]);
exit(1);
}
else
{
host_A = read_mat(argv[1]);
sA = get_size(argv[1]);
printf("Matrix loaded from file %s\n",argv[1]);
printf("Rows = %d \n",sA.m);
printf("Cols = %d \n",sA.n);
}
m_rows = sA.m;
n_cols = sA.n;
h = 1./(double)n_cols;
host_b = init_source_term(n_cols,h);
// allocate initial guess :
host_x = (double*) malloc(n_cols * sizeof(double));
for (int i = 0; i < n_cols; ++i) {
host_x[i] = 0.0;
}
// Allocate memorie for CG solver
double *gpu_A,*gpu_x,*gpu_b;
double *gpu_r,*gpu_p,*gpu_Ap;
double *gpu_alpha , *gpu_beta , *gpu_r_sqrd_old , *gpu_r_sqrd_new;
int iter = 0;
double snew ;
gpuErrchk(hipMalloc((void**)& gpu_A, m_rows*n_cols* sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_x, n_cols* sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_b, n_cols* sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_r, n_cols* sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_p, n_cols* sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_Ap, m_rows* sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_alpha, sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_beta, sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_r_sqrd_old, sizeof(double)));
gpuErrchk(hipMalloc((void**)& gpu_r_sqrd_new, sizeof(double)));
gpuErrchk(hipMalloc((void**)& partial_result, sizeof(double)*NUM_BLOCKS));
// Initialise variable in the gpu :
gpuErrchk(hipMemcpy(gpu_A, host_A, m_rows*n_cols*sizeof(double) ,hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(gpu_x,host_x, n_cols*sizeof(double) ,hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(gpu_b, host_b, n_cols*sizeof(double) ,hipMemcpyHostToDevice));
// Dimensions of blocks and grid on the GPU :
dim3 BlockDim(NUM_THREADS);
dim3 GridDim(NUM_BLOCKS);
//start the timer
t1 = second();
// begin the cg algorithm
// r=b
gpuErrchk(hipMemcpy(gpu_r, host_b, n_cols*sizeof(double) ,hipMemcpyHostToDevice));
// p=r
gpuErrchk(hipMemcpy(gpu_p, gpu_r, n_cols*sizeof(double) ,hipMemcpyDeviceToDevice));
// rsold = r'*r
dot(n_cols, gpu_r ,gpu_r, gpu_r_sqrd_old);
gpuErrchk(hipMemcpy(&snew, gpu_r_sqrd_old, sizeof(double) ,hipMemcpyDeviceToHost));
for (int j = 0; j < IMAX ; ++j) {
// compute Ap
mat_vec_mul(gpu_A,gpu_p,m_rows,n_cols,gpu_Ap);
// compute p.Ap
dot(n_cols,gpu_p,gpu_Ap,gpu_alpha);
// alpha = rsold / (p' * Ap);
hipLaunchKernelGGL(( scalardiv), dim3(1),dim3(1), 0, 0, gpu_r_sqrd_old, gpu_alpha, gpu_alpha);
// x = x + alpha * p;
hipLaunchKernelGGL(( axpy), dim3(GridDim),dim3(BlockDim), 0, 0, n_cols, gpu_alpha, gpu_p, gpu_x, gpu_x);
// r = r - alpha * Ap;
hipLaunchKernelGGL(( ymax), dim3(GridDim),dim3(BlockDim), 0, 0, n_cols, gpu_alpha, gpu_Ap , gpu_r);
//rsnew = r.r;
dot(n_cols,gpu_r,gpu_r,gpu_r_sqrd_new);
// transfer rsnew from device to host to compare with the given tolerance
hipMemcpy(&snew, gpu_r_sqrd_new, sizeof(double) ,hipMemcpyDeviceToHost);
// Convergence test
if(snew < epsilon*epsilon)
{
printf("\t[STEP %d] residual = %E\n",j,sqrt(snew));
break;
}
// p = r + (rsnew / rsold) * p;
hipLaunchKernelGGL(( scalardiv), dim3(1),dim3(1), 0, 0, gpu_r_sqrd_new, gpu_r_sqrd_old, gpu_beta);
hipLaunchKernelGGL(( axpy), dim3(GridDim),dim3(BlockDim), 0, 0, n_cols, gpu_beta, gpu_p, gpu_r, gpu_p);
//rsold = rsnew;
hipMemcpy(gpu_r_sqrd_old, gpu_r_sqrd_new, sizeof(double) ,hipMemcpyDeviceToDevice);
}
// Get back the results from the device to the host
hipMemcpy(host_x, gpu_x, n_cols*sizeof(double) ,hipMemcpyDeviceToHost);
// start the timer
t2 = second();
printf("Time for GPU CG (dense solver) = %f [s]\n",(t2-t1));
// free the memories for the device
hipFree(gpu_A);
hipFree(gpu_x);
hipFree(gpu_b);
hipFree(gpu_r);
hipFree(gpu_p);
hipFree(gpu_Ap);
hipFree(gpu_alpha);
hipFree(gpu_beta);
hipFree(gpu_r_sqrd_old);
hipFree(gpu_r_sqrd_new);
// free the memories for the host
free(host_A);
free(host_b);
free(host_x);
return 0;
}
| 07e5d29ee2c601bcaefba08f2fa53d98e95cde29.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdbool.h>
extern "C" {
#include "util.h"
#include "mmio_wrapper.h"
#include "parameters.h"
#include "second.h"
}
#define NUM_THREADS 256
#define NUM_BLOCKS 32
// Macros to simplify kernels
#define THREAD_ID threadIdx.x+blockIdx.x*blockDim.x
#define THREAD_COUNT gridDim.x*blockDim.x
// tolerance and maximum iterations
#define epsilon 1e-10
#define IMAX 1000
// Error function
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double * init_source_term(int n, double h){
double * f;
int i;
f = (double*) malloc(n*sizeof(double*));
for(i = 0; i < n; i++) {
f[i] = (double)i * -2. * M_PI * M_PI * sin(10.*M_PI*i*h) * sin(10.*M_PI*i*h);
}
return f;
}
// use to keep the partial result in the dot product
double* partial_result;
/*
* Kernel function to compute the partial dot product
* The partial result is stored in partial.
*/
__global__ void vecdot_partial(int n, double* vec1, double* vec2, double* partial)
{
__shared__ double tmp[NUM_THREADS];
tmp[threadIdx.x] = 0;
for (int i=THREAD_ID; i<n; i+=THREAD_COUNT)
{
tmp[threadIdx.x] += vec1[i]*vec2[i];
}
for (int i=blockDim.x/2;i>=1;i = i/2)
{
__syncthreads();
if (threadIdx.x < i)
{
tmp[threadIdx.x] += tmp[i + threadIdx.x];
}
}
if (threadIdx.x == 0)
{
partial[blockIdx.x] = tmp[0];
}
}
/*
* Kernel function to reduces the output of the vecdot_partial kernel to a single value.
* The result is stored in result.
*/
__global__ void vecdot_reduce(double* partial, double* result)
{
__shared__ double tmp[NUM_BLOCKS];
if (threadIdx.x < NUM_BLOCKS)
tmp[threadIdx.x] = partial[threadIdx.x];
else
tmp[threadIdx.x] = 0;
for (int i=blockDim.x/2;i>=1;i = i/2) {
__syncthreads();
if (threadIdx.x < i)
tmp[threadIdx.x] += tmp[i + threadIdx.x];
}
if (threadIdx.x == 0)
*result = tmp[0];
}
/*
* Function to perform the dot product
*/
void dot(int n, double* vec1, double* vec2, double* result)
{
dim3 BlockDim(NUM_THREADS);
dim3 GridDim(NUM_BLOCKS);
// call the kernel function
vecdot_partial<<<GridDim,BlockDim>>>(n, vec1, vec2, partial_result);
vecdot_reduce<<<1,NUM_BLOCKS>>>(partial_result, result);
}
/*
* Kernel function to perform a matrice vector multiplication
*/
__global__ void mat_vec_mul_kernel(double *device_Mat, double *device_Vect,int matRowSize, int vlength, double *device_ResVect)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int tindex=tidx+gridDim.x*NUM_BLOCKS*tidy;
if(tindex<matRowSize)
{
int i;int m=tindex*vlength;
device_ResVect[tindex]=0.00;
for(i=0;i<vlength;i++)
device_ResVect[tindex]+=device_Mat[m+i]*device_Vect[i];
}
__syncthreads();
}
/*function to launch kernel*/
void mat_vec_mul(double *device_Mat, double *device_Vect,int matRowSize, int vlength,double *device_ResVect)
{
int max=NUM_BLOCKS*NUM_BLOCKS;
int BlocksPerGrid=matRowSize/max+1;
dim3 dimBlock(NUM_BLOCKS,NUM_BLOCKS);
if(matRowSize%max==0)BlocksPerGrid--;
dim3 dimGrid(1,BlocksPerGrid);
mat_vec_mul_kernel<<<dimGrid,dimBlock>>>(device_Mat,device_Vect,matRowSize,vlength,device_ResVect);
}
/*
* Compute a simple scalar division on the device
*/
__global__ void scalardiv(double* num, double* den, double* result)
{
if(threadIdx.x==0 && blockIdx.x==0)
*result = (*num)/(*den);
}
/*
* Computes r= a*x+y for vectors x and y, and scalar a.
* n is the size of the vector
*/
__global__ void axpy(int n, double* a, double* x, double* y, double* r)
{
for (int i=THREAD_ID; i<n; i+=THREAD_COUNT)
r[i] = y[i] + (*a)*x[i];
}
/*
* Computes y= y-a*x forcvectors x and y, and scalar a.
* n is the size of the vector
*/
__global__ void ymax(int n, double* a, double* x, double* y)
{
for (int i=THREAD_ID; i<n; i+=THREAD_COUNT)
y[i] = y[i] - (*a)*x[i];
}
int main ( int argc, char **argv ) {
double * host_A;
double * host_x;
double * host_b;
double t1,t2;
int m_rows,n_cols;
struct size_m sA;
double h;
if (argc < 2)
{
fprintf(stderr, "Usage: %s [martix-market-filename]\n", argv[0]);
exit(1);
}
else
{
host_A = read_mat(argv[1]);
sA = get_size(argv[1]);
printf("Matrix loaded from file %s\n",argv[1]);
printf("Rows = %d \n",sA.m);
printf("Cols = %d \n",sA.n);
}
m_rows = sA.m;
n_cols = sA.n;
h = 1./(double)n_cols;
host_b = init_source_term(n_cols,h);
// allocate initial guess :
host_x = (double*) malloc(n_cols * sizeof(double));
for (int i = 0; i < n_cols; ++i) {
host_x[i] = 0.0;
}
// Allocate memorie for CG solver
double *gpu_A,*gpu_x,*gpu_b;
double *gpu_r,*gpu_p,*gpu_Ap;
double *gpu_alpha , *gpu_beta , *gpu_r_sqrd_old , *gpu_r_sqrd_new;
int iter = 0;
double snew ;
gpuErrchk(cudaMalloc((void**)& gpu_A, m_rows*n_cols* sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_x, n_cols* sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_b, n_cols* sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_r, n_cols* sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_p, n_cols* sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_Ap, m_rows* sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_alpha, sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_beta, sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_r_sqrd_old, sizeof(double)));
gpuErrchk(cudaMalloc((void**)& gpu_r_sqrd_new, sizeof(double)));
gpuErrchk(cudaMalloc((void**)& partial_result, sizeof(double)*NUM_BLOCKS));
// Initialise variable in the gpu :
gpuErrchk(cudaMemcpy(gpu_A, host_A, m_rows*n_cols*sizeof(double) ,cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpu_x,host_x, n_cols*sizeof(double) ,cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpu_b, host_b, n_cols*sizeof(double) ,cudaMemcpyHostToDevice));
// Dimensions of blocks and grid on the GPU :
dim3 BlockDim(NUM_THREADS);
dim3 GridDim(NUM_BLOCKS);
//start the timer
t1 = second();
// begin the cg algorithm
// r=b
gpuErrchk(cudaMemcpy(gpu_r, host_b, n_cols*sizeof(double) ,cudaMemcpyHostToDevice));
// p=r
gpuErrchk(cudaMemcpy(gpu_p, gpu_r, n_cols*sizeof(double) ,cudaMemcpyDeviceToDevice));
// rsold = r'*r
dot(n_cols, gpu_r ,gpu_r, gpu_r_sqrd_old);
gpuErrchk(cudaMemcpy(&snew, gpu_r_sqrd_old, sizeof(double) ,cudaMemcpyDeviceToHost));
for (int j = 0; j < IMAX ; ++j) {
// compute Ap
mat_vec_mul(gpu_A,gpu_p,m_rows,n_cols,gpu_Ap);
// compute p.Ap
dot(n_cols,gpu_p,gpu_Ap,gpu_alpha);
// alpha = rsold / (p' * Ap);
scalardiv<<<1,1>>>(gpu_r_sqrd_old, gpu_alpha, gpu_alpha);
// x = x + alpha * p;
axpy<<<GridDim,BlockDim>>>(n_cols, gpu_alpha, gpu_p, gpu_x, gpu_x);
// r = r - alpha * Ap;
ymax<<<GridDim,BlockDim>>>(n_cols, gpu_alpha, gpu_Ap , gpu_r);
//rsnew = r.r;
dot(n_cols,gpu_r,gpu_r,gpu_r_sqrd_new);
// transfer rsnew from device to host to compare with the given tolerance
cudaMemcpy(&snew, gpu_r_sqrd_new, sizeof(double) ,cudaMemcpyDeviceToHost);
// Convergence test
if(snew < epsilon*epsilon)
{
printf("\t[STEP %d] residual = %E\n",j,sqrt(snew));
break;
}
// p = r + (rsnew / rsold) * p;
scalardiv<<<1,1>>>(gpu_r_sqrd_new, gpu_r_sqrd_old, gpu_beta);
axpy<<<GridDim,BlockDim>>>(n_cols, gpu_beta, gpu_p, gpu_r, gpu_p);
//rsold = rsnew;
cudaMemcpy(gpu_r_sqrd_old, gpu_r_sqrd_new, sizeof(double) ,cudaMemcpyDeviceToDevice);
}
// Get back the results from the device to the host
cudaMemcpy(host_x, gpu_x, n_cols*sizeof(double) ,cudaMemcpyDeviceToHost);
// start the timer
t2 = second();
printf("Time for GPU CG (dense solver) = %f [s]\n",(t2-t1));
// free the memories for the device
cudaFree(gpu_A);
cudaFree(gpu_x);
cudaFree(gpu_b);
cudaFree(gpu_r);
cudaFree(gpu_p);
cudaFree(gpu_Ap);
cudaFree(gpu_alpha);
cudaFree(gpu_beta);
cudaFree(gpu_r_sqrd_old);
cudaFree(gpu_r_sqrd_new);
// free the memories for the host
free(host_A);
free(host_b);
free(host_x);
return 0;
}
|
873ebfe245166e79bfa47ea3d642d52d69391571.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri & Sepideh Hatamikia
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include "voxel_backprojection_spherical.hpp"
#include "voxel_backprojection_parallel_spherical.hpp"
#include <stdio.h>
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Atb",hipGetErrorString(__err));\
hipDeviceReset();\
exit(__err);\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType3D , hipReadModeElementType> tex;
__global__ void kernelPixelBackprojection_parallel_spherical(const Geometry geo,
float* image,
const int indAlpha,
const float COR,
const float DSD,
const float DSO,
const Point3D deltaX,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset,
const Point3D uv0Offset,
Point3D source){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
source.y=P.y;
source.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -source.x);
vectY=(P.y -source.y);
vectZ=(P.z -source.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(DSO-DSD /*-DDO*/ - source.x)/vectX;
float y,z;
y=vectY*t+source.y;
z=vectZ*t+source.z;
float u,v;
u=y+geo.nDetecU/2;
v=z+geo.nDetecV/2;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/DSO; //TODO: This is wrong for shperical
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, v ,
u ,
indAlpha+0.5)
*weigth;
// image[idx]=v;
}
int voxel_backprojection_parallel_spherical(float const * const projections, Geometry geo, float* result,float const * const angles,int nangles){
// mexPrintf("In fucntion COR %p \n",geo.COR);
// mexPrintf("In fucntion offOrig %p \n",geo.offOrigX);
// return 0;
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecU,geo.nDetecV,nangles);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
int divx,divy,divz;
//enpirical
divx=32;
divy=32;
divz=1;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=-angles[i*3];
geo.theta=-angles[i*3+1];
geo.psi =-angles[i*3+2];
computeDeltasCubeSphericalParallel(geo,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
hipLaunchKernelGGL(( kernelPixelBackprojection_parallel_spherical), dim3(grid),dim3(block), 0, 0, geo,dimage,i,geo.COR[i],geo.DSD[i],geo.DSO[i],deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,source);
cudaCheckErrors("Kernel fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
//hipDeviceReset();
return 0;
}
void computeDeltasCubeSphericalParallel(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S){
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
// P.x=P.x+(geo.DSD-geo.DSO);
// Px.x=Px.x+(geo.DSD-geo.DSO);
// Py.x=Py.x+(geo.DSD-geo.DSO);
// Pz.x=Pz.x+(geo.DSD-geo.DSO);
// rollPitchYawT(geo,i,&P);
// rollPitchYawT(geo,i,&Px);
// rollPitchYawT(geo,i,&Py);
// rollPitchYawT(geo,i,&Pz);
//
// P.x=P.x-(geo.DSD-geo.DSO);
// Px.x=Px.x-(geo.DSD-geo.DSO);
// Py.x=Py.x-(geo.DSD-geo.DSO);
// Pz.x=Pz.x-(geo.DSD-geo.DSO);
// Done for P, now source
// Point3D source;
// source.x=geo.DSD; //allready offseted for rotation
// source.y=-geo.offDetecU[i];
// source.z=-geo.offDetecV[i];
// rollPitchYawT(geo,i,&source);
// source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
Point3D source;
source.x=geo.DSO[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
| 873ebfe245166e79bfa47ea3d642d52d69391571.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri & Sepideh Hatamikia
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include "voxel_backprojection_spherical.hpp"
#include "voxel_backprojection_parallel_spherical.hpp"
#include <stdio.h>
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
cudaDeviceReset();\
exit(__err);\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
__global__ void kernelPixelBackprojection_parallel_spherical(const Geometry geo,
float* image,
const int indAlpha,
const float COR,
const float DSD,
const float DSO,
const Point3D deltaX,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset,
const Point3D uv0Offset,
Point3D source){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
source.y=P.y;
source.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -source.x);
vectY=(P.y -source.y);
vectZ=(P.z -source.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(DSO-DSD /*-DDO*/ - source.x)/vectX;
float y,z;
y=vectY*t+source.y;
z=vectZ*t+source.z;
float u,v;
u=y+geo.nDetecU/2;
v=z+geo.nDetecV/2;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/DSO; //TODO: This is wrong for shperical
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, v ,
u ,
indAlpha+0.5)
*weigth;
// image[idx]=v;
}
int voxel_backprojection_parallel_spherical(float const * const projections, Geometry geo, float* result,float const * const angles,int nangles){
// mexPrintf("In fucntion COR %p \n",geo.COR);
// mexPrintf("In fucntion offOrig %p \n",geo.offOrigX);
// return 0;
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecU,geo.nDetecV,nangles);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
int divx,divy,divz;
//enpirical
divx=32;
divy=32;
divz=1;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=-angles[i*3];
geo.theta=-angles[i*3+1];
geo.psi =-angles[i*3+2];
computeDeltasCubeSphericalParallel(geo,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
kernelPixelBackprojection_parallel_spherical<<<grid,block>>>(geo,dimage,i,geo.COR[i],geo.DSD[i],geo.DSO[i],deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,source);
cudaCheckErrors("Kernel fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
//cudaDeviceReset();
return 0;
}
void computeDeltasCubeSphericalParallel(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S){
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
// P.x=P.x+(geo.DSD-geo.DSO);
// Px.x=Px.x+(geo.DSD-geo.DSO);
// Py.x=Py.x+(geo.DSD-geo.DSO);
// Pz.x=Pz.x+(geo.DSD-geo.DSO);
// rollPitchYawT(geo,i,&P);
// rollPitchYawT(geo,i,&Px);
// rollPitchYawT(geo,i,&Py);
// rollPitchYawT(geo,i,&Pz);
//
// P.x=P.x-(geo.DSD-geo.DSO);
// Px.x=Px.x-(geo.DSD-geo.DSO);
// Py.x=Py.x-(geo.DSD-geo.DSO);
// Pz.x=Pz.x-(geo.DSD-geo.DSO);
// Done for P, now source
// Point3D source;
// source.x=geo.DSD; //allready offseted for rotation
// source.y=-geo.offDetecU[i];
// source.z=-geo.offDetecV[i];
// rollPitchYawT(geo,i,&source);
// source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
Point3D source;
source.x=geo.DSO[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
|
73e8932972025ee430778eccd42a66c9226aa385.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unordered_map>
#include "defines.h"
#include "kernels.h"
namespace SCAMP {
constexpr int DIAGS_PER_THREAD = 4;
constexpr int BLOCKSZ_SP = 512;
constexpr int BLOCKSZ_DP = 256;
constexpr int BLOCKSPERSM = 2;
constexpr int TILE_HEIGHT_SP = 200;
constexpr int TILE_HEIGHT_DP = 200;
template <typename T>
struct SCAMPKernelInputArgs {
SCAMPKernelInputArgs(const T *__restrict__ cov_, const T *__restrict__ dfa_,
const T *__restrict__ dfb_, const T *__restrict__ dga_,
const T *__restrict__ dgb_,
const T *__restrict__ normsa_,
const T *__restrict__ normsb_, uint32_t n_x_,
uint32_t n_y_, int32_t exclusion_lower_,
int32_t exclusion_upper_, OptionalArgs opt_)
: cov(cov_),
dfa(dfa_),
dfb(dfb_),
dga(dga_),
dgb(dgb_),
normsa(normsa_),
normsb(normsb_),
n_x(n_x_),
n_y(n_y_),
exclusion_lower(exclusion_lower_),
exclusion_upper(exclusion_upper_),
opt(opt_) {}
SCAMPKernelInputArgs(Tile *t, bool transpose, bool ab_join) {
cov = t->QT();
dfa = transpose ? t->dfb() : t->dfa();
dfb = transpose ? t->dfa() : t->dfb();
dga = transpose ? t->dgb() : t->dga();
dgb = transpose ? t->dga() : t->dgb();
normsa = transpose ? t->normsb() : t->normsa();
normsb = transpose ? t->normsa() : t->normsb();
n_x = transpose ? t->get_tile_height() : t->get_tile_width();
n_y = transpose ? t->get_tile_width() : t->get_tile_height();
n_x = n_x - t->info()->mp_window + 1;
n_y = n_y - t->info()->mp_window + 1;
std::pair<int, int> exclusion =
ab_join ? t->get_exclusion_for_ab_join(!transpose)
: t->get_exclusion_for_self_join(!transpose);
exclusion_lower = exclusion.first;
exclusion_upper = exclusion.second;
opt = t->info()->opt_args;
}
const T *__restrict__ cov;
const T *__restrict__ dfa;
const T *__restrict__ dfb;
const T *__restrict__ dga;
const T *__restrict__ dgb;
const T *__restrict__ normsa;
const T *__restrict__ normsb;
const T *__restrict__ extras[3];
uint32_t n_x;
uint32_t n_y;
int32_t exclusion_lower;
int32_t exclusion_upper;
OptionalArgs opt;
};
template <typename DATA_TYPE, typename PROFILE_DATA_TYPE, SCAMPProfileType type>
struct SCAMPSmem {
__device__ SCAMPSmem(char *smem, bool compute_rows, bool compute_columns,
int tile_width, int tile_height, int extra_operands) {
constexpr int data_size = sizeof(DATA_TYPE);
constexpr int profile_size = sizeof(PROFILE_DATA_TYPE);
int curr_byte = 0;
df_col = (DATA_TYPE *)(smem);
curr_byte += tile_width * data_size;
dg_col = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_width * data_size;
inorm_col = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_width * data_size;
df_row = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * data_size;
dg_row = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * data_size;
inorm_row = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * data_size;
if (compute_columns) {
local_mp_col = (PROFILE_DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_width * profile_size;
} else {
local_mp_col = nullptr;
}
if (compute_rows) {
local_mp_row = (PROFILE_DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * profile_size;
} else {
local_mp_row = nullptr;
}
}
DATA_TYPE *__restrict__ df_col;
DATA_TYPE *__restrict__ dg_col;
DATA_TYPE *__restrict__ inorm_col;
DATA_TYPE *__restrict__ df_row;
DATA_TYPE *__restrict__ dg_row;
DATA_TYPE *__restrict__ inorm_row;
PROFILE_DATA_TYPE *__restrict__ local_mp_col;
PROFILE_DATA_TYPE *__restrict__ local_mp_row;
};
template <typename ACCUM_TYPE>
struct SCAMPThreadInfo {
ACCUM_TYPE cov1;
ACCUM_TYPE cov2;
ACCUM_TYPE cov3;
ACCUM_TYPE cov4;
uint32_t local_row;
uint32_t local_col;
uint32_t global_row;
uint32_t global_col;
};
enum SCAMPAtomicType { ATOMIC_BLOCK, ATOMIC_GLOBAL, ATOMIC_SYSTEM };
#if __CUDA_ARCH__ < 600
// Double atomicAdd is not implemented in hardware before Pascal, providing a
// software implementation here
__device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template <typename T, SCAMPAtomicType type>
__device__ inline T do_atomicCAS(T *address, T v1, T v2) {
#if __CUDA_ARCH__ < 600
return atomicCAS(address, v1, v2);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicCAS_block(address, v1, v2);
case ATOMIC_GLOBAL:
return atomicCAS(address, v1, v2);
case ATOMIC_SYSTEM:
return atomicCAS_system(address, v1, v2);
}
// Should never happen
return 0;
#endif
}
template <typename T, SCAMPAtomicType type>
__device__ inline uint32_t do_atomicAdd(T *address, T amount) {
#if __CUDA_ARCH__ < 600
return atomicAdd(address, amount);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicAdd_block(address, amount);
case ATOMIC_GLOBAL:
return atomicAdd(address, amount);
case ATOMIC_SYSTEM:
return atomicAdd_system(address, amount);
}
// Should never happen
return 0;
#endif
}
template <typename T, SCAMPAtomicType type>
__device__ __forceinline__ T do_atomicMax(T *address, T other) {
#if __CUDA_ARCH__ < 600
return atomicMax(address, other);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicMax_block(address, other);
case ATOMIC_GLOBAL:
return atomicMax(address, other);
case ATOMIC_SYSTEM:
return atomicMax_system(address, other);
}
// Should never happen
return 0;
#endif
}
template <typename T, SCAMPAtomicType type>
__device__ __forceinline__ T do_atomicMin(T *address, T other) {
#if __CUDA_ARCH__ < 600
return atomicMax(address, other);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicMin_block(address, other);
case ATOMIC_GLOBAL:
return atomicMin(address, other);
case ATOMIC_SYSTEM:
return atomicMin_system(address, other);
}
// Should never happen
return 0;
#endif
}
template <SCAMPAtomicType type>
__device__ __forceinline__ float fAtomicMax(float *addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(do_atomicMax<int, type>(
(int *)addr, __float_as_int(value)))
: __uint_as_float(do_atomicMin<unsigned int, type>(
(unsigned int *)addr, __float_as_uint(value)));
return old;
}
template <SCAMPAtomicType type>
__device__ __forceinline__ float fAtomicMax_check(float *addr, float value,
float check) {
if (value < check) {
return check;
}
return fAtomicMax<type>(addr, value);
}
// Atomically updates the MP/idxs using a single 64-bit integer. We lose a small
// amount of precision in the output, if we do not do this we are unable
// to atomically update both the matrix profile and the indexes without using a
// critical section and dedicated locks.
template <SCAMPAtomicType type>
__device__ inline void MPatomicMax(volatile uint64_t *address, float val,
unsigned int idx) {
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val) {
loctest.ulong = do_atomicCAS<unsigned long long int, type>(
(unsigned long long int *)address, loctest.ulong, loc.ulong);
}
}
// As above, but checks a previously read value before attempting another read
// This allows us to exploit vectorized loads of the matrix profile
__device__ inline void MPatomicMax_check(
volatile uint64_t *__restrict__ address, float val, unsigned int idx,
float curr_val) {
if (val > curr_val) {
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val) {
loctest.ulong = do_atomicCAS<unsigned long long int, ATOMIC_BLOCK>(
(unsigned long long int *)address, loctest.ulong, loc.ulong);
}
}
}
// Gets the max of 4 values (avoids returning NaN if any of d1-d4 are NaN)
template <typename T>
__device__ inline T max4(const T &d1, const T &d2, const T &d3, const T &d4) {
float ret = -2;
if (d1 > ret) {
ret = d1;
}
if (d2 > ret) {
ret = d2;
}
if (d3 > ret) {
ret = d3;
}
if (d4 > ret) {
ret = d4;
}
return ret;
}
// Gets the max of 4 values (avoids returning NaN if any of d1-d4 are NaN)
// Including the index
template <typename T>
__device__ inline T max4_index(const T &d1, const T &d2, const T &d3,
const T &d4, const uint32_t init,
uint32_t &idx) {
float ret = -2;
if (d1 > ret) {
ret = d1;
idx = init;
}
if (d2 > ret) {
ret = d2;
idx = init + 1;
}
if (d3 > ret) {
ret = d3;
idx = init + 2;
}
if (d4 > ret) {
ret = d4;
idx = init + 3;
}
return ret;
}
class SCAMPStrategy {
public:
};
/////////////////////////////////////////////////////////////////////////////////////
// THESE HEADERS DEFINE VARIOUS COMPUTE STRATEGIES USED TO COMPUTE VARIOUS
// PROFILE TYPES
///////////////////////////////////////////////////////////////////////////////////
#include "kernels_compute.h"
#include "kernels_smem.h"
/////////////////////////////////////////////////////////////////////////////////////
//
// SCAMP TACTIC DESCRIBES STRATEGY FOR WHAT OPS TO EXECUTE IN THE KERNEL
//
/////////////////////////////////////////////////////////////////////////////////////
template <typename DATA_TYPE, typename VEC2_DATA_TYPE, typename VEC4_DATA_TYPE,
typename PROFILE_DATA_TYPE, typename ACCUM_TYPE,
typename DISTANCE_TYPE, bool COMPUTE_ROWS, bool COMPUTE_COLS,
int TILE_WIDTH, int TILE_HEIGHT, int BLOCKSZ,
SCAMPProfileType PROFILE_TYPE>
class SCAMPTactic {
public:
__device__ SCAMPTactic() {}
__device__ void InitMem(
SCAMPKernelInputArgs<double> &args,
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> &smem,
PROFILE_DATA_TYPE *__restrict__ profile_a,
PROFILE_DATA_TYPE *__restrict__ profile_b, uint32_t col_start,
uint32_t row_start) {
_init_mem.exec(args, smem, profile_a, profile_b, col_start, row_start);
}
__device__ inline FORCE_INLINE void DoIteration(
SCAMPThreadInfo<ACCUM_TYPE> &info,
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> &smem,
OptionalArgs &args) {
_do_iteration.exec(info, smem, args);
}
__device__ inline void WriteBack(uint32_t tile_start_x, uint32_t tile_start_y,
uint32_t n_x, uint32_t n_y,
PROFILE_DATA_TYPE *__restrict__ local_mp_col,
PROFILE_DATA_TYPE *__restrict__ local_mp_row,
PROFILE_DATA_TYPE *__restrict__ profile_A,
PROFILE_DATA_TYPE *__restrict__ profile_B) {
_do_writeback.exec(tile_start_x, tile_start_y, n_x, n_y, local_mp_col,
local_mp_row, profile_A, profile_B);
}
private:
InitMemStrategy<DATA_TYPE, PROFILE_DATA_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
TILE_WIDTH, TILE_HEIGHT, BLOCKSZ, PROFILE_TYPE>
_init_mem;
DoIterationStrategy<DATA_TYPE, VEC2_DATA_TYPE, VEC4_DATA_TYPE, ACCUM_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS,
COMPUTE_COLS, PROFILE_TYPE>
_do_iteration;
WriteBackStrategy<PROFILE_DATA_TYPE, COMPUTE_COLS, COMPUTE_ROWS, TILE_WIDTH,
TILE_HEIGHT, BLOCKSZ, PROFILE_TYPE>
_do_writeback;
};
// Computes the matrix profile given the sliding dot products for the first
// query and the precomputed data statisics
template <typename DATA_TYPE, typename VEC2_DATA_TYPE, typename VEC4_DATA_TYPE,
typename ACCUM_TYPE, typename PROFILE_DATA_TYPE,
typename DISTANCE_TYPE, bool COMPUTE_ROWS, bool COMPUTE_COLS,
SCAMPProfileType PROFILE_TYPE, int blocks_per_sm, int tile_height,
int BLOCKSZ>
__global__ void __launch_bounds__(BLOCKSZ, blocks_per_sm)
do_tile(SCAMPKernelInputArgs<double> args,
PROFILE_DATA_TYPE *__restrict__ profile_A,
PROFILE_DATA_TYPE *__restrict__ profile_B) {
constexpr int tile_width = tile_height + BLOCKSZ * DIAGS_PER_THREAD;
SCAMPTactic<DATA_TYPE, VEC2_DATA_TYPE, VEC4_DATA_TYPE, PROFILE_DATA_TYPE,
ACCUM_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, tile_width,
tile_height, BLOCKSZ, PROFILE_TYPE>
tactic;
SCAMPThreadInfo<ACCUM_TYPE> thread_info;
extern __shared__ char smem_raw[];
// Wrap the shared memory in a struct which contains handles shared memory
// accesses
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> smem(
smem_raw, COMPUTE_ROWS, COMPUTE_COLS, tile_width, tile_height,
args.opt.num_extra_operands);
// Find the starting diagonal of the distance matrix
const unsigned int start_diag = (threadIdx.x * DIAGS_PER_THREAD) +
blockIdx.x * (blockDim.x * DIAGS_PER_THREAD);
// This is the index of the meta-diagonal that this thread block will work on
const unsigned int meta_diagonal_idx = blockIdx.x;
// The first diagonals constitiure a trivial match between the same
// subsequence, we must exclude these from the calculation according to
// args.exclusion_lower
uint32_t tile_start_col =
meta_diagonal_idx * (BLOCKSZ * DIAGS_PER_THREAD) + args.exclusion_lower;
uint32_t tile_start_row = 0;
// Initialize the column and row position of the current thread
thread_info.global_col = tile_start_col + threadIdx.x * DIAGS_PER_THREAD;
thread_info.global_row = 0;
// num_diags is the number of diagonals in the distance matrix, less any
// diagonals at the end we are not computing
const unsigned int num_diags = args.n_x - args.exclusion_upper + 1;
// Load the first dot product values
if (thread_info.global_col < args.n_x) {
thread_info.cov1 = args.cov[thread_info.global_col];
}
if (thread_info.global_col + 1 < args.n_x) {
thread_info.cov2 = args.cov[thread_info.global_col + 1];
}
if (thread_info.global_col + 2 < args.n_x) {
thread_info.cov3 = args.cov[thread_info.global_col + 2];
}
if (thread_info.global_col + 3 < args.n_x) {
thread_info.cov4 = args.cov[thread_info.global_col + 3];
}
/////////////////////////////////////
// Main loop
/////////////////////////////////////
// Each threadblock finds all the distances on a 'metadiagonal'
// We use a tiled approach for each thread block
// The tiles are horizontal slices of the diagonal, think of a parallelogram
// cut from a diagonal slice of the distance matrix. Each thread starts on the
// first row and works its way down-right towards right side of the distance
// matrix
while (tile_start_col < args.n_x && tile_start_row < args.n_y) {
// Initialize the next tile's shared memory
tactic.InitMem(args, smem, profile_A, profile_B, tile_start_col,
tile_start_row);
thread_info.local_col = threadIdx.x * DIAGS_PER_THREAD;
thread_info.local_row = 0;
// Start of new tile, sync so we don't have data races with shared memory
// initializaton
__syncthreads();
// There are 2 pathways here, most of the time we take the fast path (top),
// the last tile in every thread-block will take the slower path (bottom)
if (tile_start_col + tile_width < args.n_x &&
tile_start_row + tile_height < args.n_y &&
start_diag + DIAGS_PER_THREAD - 1 < num_diags) {
// Fast Path
while (thread_info.local_row < tile_height) {
tactic.DoIteration(thread_info, smem, args.opt);
}
} else if (start_diag < num_diags) {
// Slow Path
while (thread_info.global_col < args.n_x &&
thread_info.global_row < args.n_y &&
thread_info.local_row < tile_height) {
do_row_edge<DATA_TYPE, PROFILE_DATA_TYPE, ACCUM_TYPE, DISTANCE_TYPE,
PROFILE_TYPE, COMPUTE_ROWS, COMPUTE_COLS>(
thread_info, smem, args.n_x, start_diag, num_diags, args.opt);
++thread_info.global_col;
++thread_info.global_row;
++thread_info.local_col;
++thread_info.local_row;
}
}
// After this sync, the caches will be updated with the best so far values
// for this tile
__syncthreads();
// Write back our best-so-far computed for this tile to global memory
tactic.WriteBack(tile_start_col, tile_start_row, args.n_x, args.n_y,
smem.local_mp_col, smem.local_mp_row, profile_A,
profile_B);
// Update the tile position
tile_start_col += tile_height;
tile_start_row += tile_height;
// Make sure our updates were committed before we pull in the next tile
__threadfence_block();
}
}
int get_blocksz(SCAMPPrecisionType t, const hipDeviceProp_t &dev_prop) {
if (t == PRECISION_DOUBLE) {
return BLOCKSZ_DP;
} else {
return BLOCKSZ_SP;
}
}
constexpr int FPTypeSize(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return sizeof(double);
case PRECISION_MIXED:
case PRECISION_SINGLE:
return sizeof(float);
case PRECISION_INVALID:
return -1;
}
return -1;
}
int GetTileHeight(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return TILE_HEIGHT_DP;
case PRECISION_MIXED:
case PRECISION_SINGLE:
return TILE_HEIGHT_SP;
case PRECISION_INVALID:
return -1;
}
return -1;
}
int get_smem(const OpInfo *info, uint64_t blocksz) {
constexpr int num_shared_variables = 3;
int intermediate_data_size = FPTypeSize(info->fp_type);
int tile_height = GetTileHeight(info->fp_type);
int tile_width = blocksz * DIAGS_PER_THREAD + tile_height;
int smem = (tile_width + tile_height) *
(num_shared_variables + info->opt_args.num_extra_operands) *
intermediate_data_size;
int profile_data_size = GetProfileTypeSize(info->profile_type);
if (info->computing_cols) {
smem += tile_width * profile_data_size;
}
if (info->computing_rows) {
smem += tile_height * profile_data_size;
}
return smem;
}
template <typename PROFILE_DATA_TYPE, typename DISTANCE_TYPE,
SCAMPProfileType PROFILE_TYPE, int BLOCKSPERSM>
SCAMPError_t LaunchDoTile(SCAMPKernelInputArgs<double> args,
PROFILE_DATA_TYPE *profile_A,
PROFILE_DATA_TYPE *profile_B,
SCAMPPrecisionType fp_type, bool computing_rows,
bool computing_cols, uint64_t blocksz,
uint64_t num_blocks, uint64_t smem, hipStream_t s) {
dim3 block(blocksz, 1, 1);
dim3 grid(num_blocks, 1, 1);
if (computing_rows && computing_cols) {
constexpr bool COMPUTE_COLS = true;
constexpr bool COMPUTE_ROWS = true;
switch (fp_type) {
case PRECISION_DOUBLE: {
hipLaunchKernelGGL(( do_tile<double, double2, double4, double, PROFILE_DATA_TYPE,
DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE,
BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
hipLaunchKernelGGL(( do_tile<float, float2, float4, double, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
hipLaunchKernelGGL(( do_tile<float, float2, float4, float, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
return SCAMP_NO_ERROR;
} else if (computing_cols) {
constexpr bool COMPUTE_COLS = true;
constexpr bool COMPUTE_ROWS = false;
switch (fp_type) {
case PRECISION_DOUBLE: {
hipLaunchKernelGGL(( do_tile<double, double2, double4, double, PROFILE_DATA_TYPE,
DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE,
BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
hipLaunchKernelGGL(( do_tile<float, float2, float4, double, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
hipLaunchKernelGGL(( do_tile<float, float2, float4, float, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
} else if (computing_rows) {
constexpr bool COMPUTE_COLS = false;
constexpr bool COMPUTE_ROWS = true;
switch (fp_type) {
case PRECISION_DOUBLE: {
hipLaunchKernelGGL(( do_tile<double, double2, double4, double, PROFILE_DATA_TYPE,
DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE,
BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
hipLaunchKernelGGL(( do_tile<float, float2, float4, double, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
hipLaunchKernelGGL(( do_tile<float, float2, float4, float, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>)
, dim3(grid), dim3(block), smem, s, args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
}
return SCAMP_NO_ERROR;
}
SCAMPError_t compute_gpu_resources_and_launch(SCAMPKernelInputArgs<double> args,
Tile *t, void *profile_a,
void *profile_b, bool do_rows,
bool do_cols) {
int exclusion_total = args.exclusion_lower + args.exclusion_upper;
uint64_t blocksz = get_blocksz(t->info()->fp_type, t->get_dev_props());
uint64_t num_workers = ceil((args.n_x - exclusion_total) /
static_cast<double>(DIAGS_PER_THREAD));
uint64_t num_blocks = ceil(num_workers / static_cast<double>(blocksz));
uint64_t smem = get_smem(t->info(), blocksz);
if (exclusion_total < args.n_x) {
switch (t->info()->profile_type) {
case PROFILE_TYPE_SUM_THRESH:
return LaunchDoTile<double, double, PROFILE_TYPE_SUM_THRESH,
BLOCKSPERSM>(
args, reinterpret_cast<double *>(profile_a),
reinterpret_cast<double *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_1NN_INDEX:
return LaunchDoTile<uint64_t, float, PROFILE_TYPE_1NN_INDEX,
BLOCKSPERSM>(
args, reinterpret_cast<uint64_t *>(profile_a),
reinterpret_cast<uint64_t *>(profile_b), t->info()->fp_type,
do_rows, do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_1NN:
return LaunchDoTile<float, float, PROFILE_TYPE_1NN, BLOCKSPERSM>(
args, reinterpret_cast<float *>(profile_a),
reinterpret_cast<float *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
default:
return SCAMP_FUNCTIONALITY_UNIMPLEMENTED;
}
}
return SCAMP_NO_ERROR;
}
SCAMPError_t gpu_kernel_self_join_upper(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, false, false);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_a(), t->profile_b(), t->info()->computing_rows,
t->info()->computing_cols);
}
SCAMPError_t gpu_kernel_self_join_lower(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, true, false);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_b(), t->profile_a(), t->info()->computing_cols,
t->info()->computing_rows);
}
SCAMPError_t gpu_kernel_ab_join_upper(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, false, true);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_a(), t->profile_b(), t->info()->computing_rows,
t->info()->computing_cols);
}
SCAMPError_t gpu_kernel_ab_join_lower(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, true, true);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_b(), t->profile_a(), t->info()->computing_cols,
t->info()->computing_rows);
}
} // namespace SCAMP
| 73e8932972025ee430778eccd42a66c9226aa385.cu | #include <unordered_map>
#include "defines.h"
#include "kernels.h"
namespace SCAMP {
constexpr int DIAGS_PER_THREAD = 4;
constexpr int BLOCKSZ_SP = 512;
constexpr int BLOCKSZ_DP = 256;
constexpr int BLOCKSPERSM = 2;
constexpr int TILE_HEIGHT_SP = 200;
constexpr int TILE_HEIGHT_DP = 200;
template <typename T>
struct SCAMPKernelInputArgs {
SCAMPKernelInputArgs(const T *__restrict__ cov_, const T *__restrict__ dfa_,
const T *__restrict__ dfb_, const T *__restrict__ dga_,
const T *__restrict__ dgb_,
const T *__restrict__ normsa_,
const T *__restrict__ normsb_, uint32_t n_x_,
uint32_t n_y_, int32_t exclusion_lower_,
int32_t exclusion_upper_, OptionalArgs opt_)
: cov(cov_),
dfa(dfa_),
dfb(dfb_),
dga(dga_),
dgb(dgb_),
normsa(normsa_),
normsb(normsb_),
n_x(n_x_),
n_y(n_y_),
exclusion_lower(exclusion_lower_),
exclusion_upper(exclusion_upper_),
opt(opt_) {}
SCAMPKernelInputArgs(Tile *t, bool transpose, bool ab_join) {
cov = t->QT();
dfa = transpose ? t->dfb() : t->dfa();
dfb = transpose ? t->dfa() : t->dfb();
dga = transpose ? t->dgb() : t->dga();
dgb = transpose ? t->dga() : t->dgb();
normsa = transpose ? t->normsb() : t->normsa();
normsb = transpose ? t->normsa() : t->normsb();
n_x = transpose ? t->get_tile_height() : t->get_tile_width();
n_y = transpose ? t->get_tile_width() : t->get_tile_height();
n_x = n_x - t->info()->mp_window + 1;
n_y = n_y - t->info()->mp_window + 1;
std::pair<int, int> exclusion =
ab_join ? t->get_exclusion_for_ab_join(!transpose)
: t->get_exclusion_for_self_join(!transpose);
exclusion_lower = exclusion.first;
exclusion_upper = exclusion.second;
opt = t->info()->opt_args;
}
const T *__restrict__ cov;
const T *__restrict__ dfa;
const T *__restrict__ dfb;
const T *__restrict__ dga;
const T *__restrict__ dgb;
const T *__restrict__ normsa;
const T *__restrict__ normsb;
const T *__restrict__ extras[3];
uint32_t n_x;
uint32_t n_y;
int32_t exclusion_lower;
int32_t exclusion_upper;
OptionalArgs opt;
};
template <typename DATA_TYPE, typename PROFILE_DATA_TYPE, SCAMPProfileType type>
struct SCAMPSmem {
__device__ SCAMPSmem(char *smem, bool compute_rows, bool compute_columns,
int tile_width, int tile_height, int extra_operands) {
constexpr int data_size = sizeof(DATA_TYPE);
constexpr int profile_size = sizeof(PROFILE_DATA_TYPE);
int curr_byte = 0;
df_col = (DATA_TYPE *)(smem);
curr_byte += tile_width * data_size;
dg_col = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_width * data_size;
inorm_col = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_width * data_size;
df_row = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * data_size;
dg_row = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * data_size;
inorm_row = (DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * data_size;
if (compute_columns) {
local_mp_col = (PROFILE_DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_width * profile_size;
} else {
local_mp_col = nullptr;
}
if (compute_rows) {
local_mp_row = (PROFILE_DATA_TYPE *)(smem + curr_byte);
curr_byte += tile_height * profile_size;
} else {
local_mp_row = nullptr;
}
}
DATA_TYPE *__restrict__ df_col;
DATA_TYPE *__restrict__ dg_col;
DATA_TYPE *__restrict__ inorm_col;
DATA_TYPE *__restrict__ df_row;
DATA_TYPE *__restrict__ dg_row;
DATA_TYPE *__restrict__ inorm_row;
PROFILE_DATA_TYPE *__restrict__ local_mp_col;
PROFILE_DATA_TYPE *__restrict__ local_mp_row;
};
template <typename ACCUM_TYPE>
struct SCAMPThreadInfo {
ACCUM_TYPE cov1;
ACCUM_TYPE cov2;
ACCUM_TYPE cov3;
ACCUM_TYPE cov4;
uint32_t local_row;
uint32_t local_col;
uint32_t global_row;
uint32_t global_col;
};
enum SCAMPAtomicType { ATOMIC_BLOCK, ATOMIC_GLOBAL, ATOMIC_SYSTEM };
#if __CUDA_ARCH__ < 600
// Double atomicAdd is not implemented in hardware before Pascal, providing a
// software implementation here
__device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template <typename T, SCAMPAtomicType type>
__device__ inline T do_atomicCAS(T *address, T v1, T v2) {
#if __CUDA_ARCH__ < 600
return atomicCAS(address, v1, v2);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicCAS_block(address, v1, v2);
case ATOMIC_GLOBAL:
return atomicCAS(address, v1, v2);
case ATOMIC_SYSTEM:
return atomicCAS_system(address, v1, v2);
}
// Should never happen
return 0;
#endif
}
template <typename T, SCAMPAtomicType type>
__device__ inline uint32_t do_atomicAdd(T *address, T amount) {
#if __CUDA_ARCH__ < 600
return atomicAdd(address, amount);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicAdd_block(address, amount);
case ATOMIC_GLOBAL:
return atomicAdd(address, amount);
case ATOMIC_SYSTEM:
return atomicAdd_system(address, amount);
}
// Should never happen
return 0;
#endif
}
template <typename T, SCAMPAtomicType type>
__device__ __forceinline__ T do_atomicMax(T *address, T other) {
#if __CUDA_ARCH__ < 600
return atomicMax(address, other);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicMax_block(address, other);
case ATOMIC_GLOBAL:
return atomicMax(address, other);
case ATOMIC_SYSTEM:
return atomicMax_system(address, other);
}
// Should never happen
return 0;
#endif
}
template <typename T, SCAMPAtomicType type>
__device__ __forceinline__ T do_atomicMin(T *address, T other) {
#if __CUDA_ARCH__ < 600
return atomicMax(address, other);
#else
switch (type) {
case ATOMIC_BLOCK:
return atomicMin_block(address, other);
case ATOMIC_GLOBAL:
return atomicMin(address, other);
case ATOMIC_SYSTEM:
return atomicMin_system(address, other);
}
// Should never happen
return 0;
#endif
}
template <SCAMPAtomicType type>
__device__ __forceinline__ float fAtomicMax(float *addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(do_atomicMax<int, type>(
(int *)addr, __float_as_int(value)))
: __uint_as_float(do_atomicMin<unsigned int, type>(
(unsigned int *)addr, __float_as_uint(value)));
return old;
}
template <SCAMPAtomicType type>
__device__ __forceinline__ float fAtomicMax_check(float *addr, float value,
float check) {
if (value < check) {
return check;
}
return fAtomicMax<type>(addr, value);
}
// Atomically updates the MP/idxs using a single 64-bit integer. We lose a small
// amount of precision in the output, if we do not do this we are unable
// to atomically update both the matrix profile and the indexes without using a
// critical section and dedicated locks.
template <SCAMPAtomicType type>
__device__ inline void MPatomicMax(volatile uint64_t *address, float val,
unsigned int idx) {
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val) {
loctest.ulong = do_atomicCAS<unsigned long long int, type>(
(unsigned long long int *)address, loctest.ulong, loc.ulong);
}
}
// As above, but checks a previously read value before attempting another read
// This allows us to exploit vectorized loads of the matrix profile
__device__ inline void MPatomicMax_check(
volatile uint64_t *__restrict__ address, float val, unsigned int idx,
float curr_val) {
if (val > curr_val) {
mp_entry loc, loctest;
loc.floats[0] = val;
loc.ints[1] = idx;
loctest.ulong = *address;
while (loctest.floats[0] < val) {
loctest.ulong = do_atomicCAS<unsigned long long int, ATOMIC_BLOCK>(
(unsigned long long int *)address, loctest.ulong, loc.ulong);
}
}
}
// Gets the max of 4 values (avoids returning NaN if any of d1-d4 are NaN)
template <typename T>
__device__ inline T max4(const T &d1, const T &d2, const T &d3, const T &d4) {
float ret = -2;
if (d1 > ret) {
ret = d1;
}
if (d2 > ret) {
ret = d2;
}
if (d3 > ret) {
ret = d3;
}
if (d4 > ret) {
ret = d4;
}
return ret;
}
// Gets the max of 4 values (avoids returning NaN if any of d1-d4 are NaN)
// Including the index
template <typename T>
__device__ inline T max4_index(const T &d1, const T &d2, const T &d3,
const T &d4, const uint32_t init,
uint32_t &idx) {
float ret = -2;
if (d1 > ret) {
ret = d1;
idx = init;
}
if (d2 > ret) {
ret = d2;
idx = init + 1;
}
if (d3 > ret) {
ret = d3;
idx = init + 2;
}
if (d4 > ret) {
ret = d4;
idx = init + 3;
}
return ret;
}
class SCAMPStrategy {
public:
};
/////////////////////////////////////////////////////////////////////////////////////
// THESE HEADERS DEFINE VARIOUS COMPUTE STRATEGIES USED TO COMPUTE VARIOUS
// PROFILE TYPES
///////////////////////////////////////////////////////////////////////////////////
#include "kernels_compute.h"
#include "kernels_smem.h"
/////////////////////////////////////////////////////////////////////////////////////
//
// SCAMP TACTIC DESCRIBES STRATEGY FOR WHAT OPS TO EXECUTE IN THE KERNEL
//
/////////////////////////////////////////////////////////////////////////////////////
template <typename DATA_TYPE, typename VEC2_DATA_TYPE, typename VEC4_DATA_TYPE,
typename PROFILE_DATA_TYPE, typename ACCUM_TYPE,
typename DISTANCE_TYPE, bool COMPUTE_ROWS, bool COMPUTE_COLS,
int TILE_WIDTH, int TILE_HEIGHT, int BLOCKSZ,
SCAMPProfileType PROFILE_TYPE>
class SCAMPTactic {
public:
__device__ SCAMPTactic() {}
__device__ void InitMem(
SCAMPKernelInputArgs<double> &args,
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> &smem,
PROFILE_DATA_TYPE *__restrict__ profile_a,
PROFILE_DATA_TYPE *__restrict__ profile_b, uint32_t col_start,
uint32_t row_start) {
_init_mem.exec(args, smem, profile_a, profile_b, col_start, row_start);
}
__device__ inline FORCE_INLINE void DoIteration(
SCAMPThreadInfo<ACCUM_TYPE> &info,
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> &smem,
OptionalArgs &args) {
_do_iteration.exec(info, smem, args);
}
__device__ inline void WriteBack(uint32_t tile_start_x, uint32_t tile_start_y,
uint32_t n_x, uint32_t n_y,
PROFILE_DATA_TYPE *__restrict__ local_mp_col,
PROFILE_DATA_TYPE *__restrict__ local_mp_row,
PROFILE_DATA_TYPE *__restrict__ profile_A,
PROFILE_DATA_TYPE *__restrict__ profile_B) {
_do_writeback.exec(tile_start_x, tile_start_y, n_x, n_y, local_mp_col,
local_mp_row, profile_A, profile_B);
}
private:
InitMemStrategy<DATA_TYPE, PROFILE_DATA_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
TILE_WIDTH, TILE_HEIGHT, BLOCKSZ, PROFILE_TYPE>
_init_mem;
DoIterationStrategy<DATA_TYPE, VEC2_DATA_TYPE, VEC4_DATA_TYPE, ACCUM_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS,
COMPUTE_COLS, PROFILE_TYPE>
_do_iteration;
WriteBackStrategy<PROFILE_DATA_TYPE, COMPUTE_COLS, COMPUTE_ROWS, TILE_WIDTH,
TILE_HEIGHT, BLOCKSZ, PROFILE_TYPE>
_do_writeback;
};
// Computes the matrix profile given the sliding dot products for the first
// query and the precomputed data statisics
template <typename DATA_TYPE, typename VEC2_DATA_TYPE, typename VEC4_DATA_TYPE,
typename ACCUM_TYPE, typename PROFILE_DATA_TYPE,
typename DISTANCE_TYPE, bool COMPUTE_ROWS, bool COMPUTE_COLS,
SCAMPProfileType PROFILE_TYPE, int blocks_per_sm, int tile_height,
int BLOCKSZ>
__global__ void __launch_bounds__(BLOCKSZ, blocks_per_sm)
do_tile(SCAMPKernelInputArgs<double> args,
PROFILE_DATA_TYPE *__restrict__ profile_A,
PROFILE_DATA_TYPE *__restrict__ profile_B) {
constexpr int tile_width = tile_height + BLOCKSZ * DIAGS_PER_THREAD;
SCAMPTactic<DATA_TYPE, VEC2_DATA_TYPE, VEC4_DATA_TYPE, PROFILE_DATA_TYPE,
ACCUM_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, tile_width,
tile_height, BLOCKSZ, PROFILE_TYPE>
tactic;
SCAMPThreadInfo<ACCUM_TYPE> thread_info;
extern __shared__ char smem_raw[];
// Wrap the shared memory in a struct which contains handles shared memory
// accesses
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> smem(
smem_raw, COMPUTE_ROWS, COMPUTE_COLS, tile_width, tile_height,
args.opt.num_extra_operands);
// Find the starting diagonal of the distance matrix
const unsigned int start_diag = (threadIdx.x * DIAGS_PER_THREAD) +
blockIdx.x * (blockDim.x * DIAGS_PER_THREAD);
// This is the index of the meta-diagonal that this thread block will work on
const unsigned int meta_diagonal_idx = blockIdx.x;
// The first diagonals constitiure a trivial match between the same
// subsequence, we must exclude these from the calculation according to
// args.exclusion_lower
uint32_t tile_start_col =
meta_diagonal_idx * (BLOCKSZ * DIAGS_PER_THREAD) + args.exclusion_lower;
uint32_t tile_start_row = 0;
// Initialize the column and row position of the current thread
thread_info.global_col = tile_start_col + threadIdx.x * DIAGS_PER_THREAD;
thread_info.global_row = 0;
// num_diags is the number of diagonals in the distance matrix, less any
// diagonals at the end we are not computing
const unsigned int num_diags = args.n_x - args.exclusion_upper + 1;
// Load the first dot product values
if (thread_info.global_col < args.n_x) {
thread_info.cov1 = args.cov[thread_info.global_col];
}
if (thread_info.global_col + 1 < args.n_x) {
thread_info.cov2 = args.cov[thread_info.global_col + 1];
}
if (thread_info.global_col + 2 < args.n_x) {
thread_info.cov3 = args.cov[thread_info.global_col + 2];
}
if (thread_info.global_col + 3 < args.n_x) {
thread_info.cov4 = args.cov[thread_info.global_col + 3];
}
/////////////////////////////////////
// Main loop
/////////////////////////////////////
// Each threadblock finds all the distances on a 'metadiagonal'
// We use a tiled approach for each thread block
// The tiles are horizontal slices of the diagonal, think of a parallelogram
// cut from a diagonal slice of the distance matrix. Each thread starts on the
// first row and works its way down-right towards right side of the distance
// matrix
while (tile_start_col < args.n_x && tile_start_row < args.n_y) {
// Initialize the next tile's shared memory
tactic.InitMem(args, smem, profile_A, profile_B, tile_start_col,
tile_start_row);
thread_info.local_col = threadIdx.x * DIAGS_PER_THREAD;
thread_info.local_row = 0;
// Start of new tile, sync so we don't have data races with shared memory
// initializaton
__syncthreads();
// There are 2 pathways here, most of the time we take the fast path (top),
// the last tile in every thread-block will take the slower path (bottom)
if (tile_start_col + tile_width < args.n_x &&
tile_start_row + tile_height < args.n_y &&
start_diag + DIAGS_PER_THREAD - 1 < num_diags) {
// Fast Path
while (thread_info.local_row < tile_height) {
tactic.DoIteration(thread_info, smem, args.opt);
}
} else if (start_diag < num_diags) {
// Slow Path
while (thread_info.global_col < args.n_x &&
thread_info.global_row < args.n_y &&
thread_info.local_row < tile_height) {
do_row_edge<DATA_TYPE, PROFILE_DATA_TYPE, ACCUM_TYPE, DISTANCE_TYPE,
PROFILE_TYPE, COMPUTE_ROWS, COMPUTE_COLS>(
thread_info, smem, args.n_x, start_diag, num_diags, args.opt);
++thread_info.global_col;
++thread_info.global_row;
++thread_info.local_col;
++thread_info.local_row;
}
}
// After this sync, the caches will be updated with the best so far values
// for this tile
__syncthreads();
// Write back our best-so-far computed for this tile to global memory
tactic.WriteBack(tile_start_col, tile_start_row, args.n_x, args.n_y,
smem.local_mp_col, smem.local_mp_row, profile_A,
profile_B);
// Update the tile position
tile_start_col += tile_height;
tile_start_row += tile_height;
// Make sure our updates were committed before we pull in the next tile
__threadfence_block();
}
}
int get_blocksz(SCAMPPrecisionType t, const cudaDeviceProp &dev_prop) {
if (t == PRECISION_DOUBLE) {
return BLOCKSZ_DP;
} else {
return BLOCKSZ_SP;
}
}
constexpr int FPTypeSize(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return sizeof(double);
case PRECISION_MIXED:
case PRECISION_SINGLE:
return sizeof(float);
case PRECISION_INVALID:
return -1;
}
return -1;
}
int GetTileHeight(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return TILE_HEIGHT_DP;
case PRECISION_MIXED:
case PRECISION_SINGLE:
return TILE_HEIGHT_SP;
case PRECISION_INVALID:
return -1;
}
return -1;
}
int get_smem(const OpInfo *info, uint64_t blocksz) {
constexpr int num_shared_variables = 3;
int intermediate_data_size = FPTypeSize(info->fp_type);
int tile_height = GetTileHeight(info->fp_type);
int tile_width = blocksz * DIAGS_PER_THREAD + tile_height;
int smem = (tile_width + tile_height) *
(num_shared_variables + info->opt_args.num_extra_operands) *
intermediate_data_size;
int profile_data_size = GetProfileTypeSize(info->profile_type);
if (info->computing_cols) {
smem += tile_width * profile_data_size;
}
if (info->computing_rows) {
smem += tile_height * profile_data_size;
}
return smem;
}
template <typename PROFILE_DATA_TYPE, typename DISTANCE_TYPE,
SCAMPProfileType PROFILE_TYPE, int BLOCKSPERSM>
SCAMPError_t LaunchDoTile(SCAMPKernelInputArgs<double> args,
PROFILE_DATA_TYPE *profile_A,
PROFILE_DATA_TYPE *profile_B,
SCAMPPrecisionType fp_type, bool computing_rows,
bool computing_cols, uint64_t blocksz,
uint64_t num_blocks, uint64_t smem, cudaStream_t s) {
dim3 block(blocksz, 1, 1);
dim3 grid(num_blocks, 1, 1);
if (computing_rows && computing_cols) {
constexpr bool COMPUTE_COLS = true;
constexpr bool COMPUTE_ROWS = true;
switch (fp_type) {
case PRECISION_DOUBLE: {
do_tile<double, double2, double4, double, PROFILE_DATA_TYPE,
DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE,
BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
do_tile<float, float2, float4, double, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
do_tile<float, float2, float4, float, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
return SCAMP_NO_ERROR;
} else if (computing_cols) {
constexpr bool COMPUTE_COLS = true;
constexpr bool COMPUTE_ROWS = false;
switch (fp_type) {
case PRECISION_DOUBLE: {
do_tile<double, double2, double4, double, PROFILE_DATA_TYPE,
DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE,
BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
do_tile<float, float2, float4, double, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
do_tile<float, float2, float4, float, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
} else if (computing_rows) {
constexpr bool COMPUTE_COLS = false;
constexpr bool COMPUTE_ROWS = true;
switch (fp_type) {
case PRECISION_DOUBLE: {
do_tile<double, double2, double4, double, PROFILE_DATA_TYPE,
DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE,
BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
do_tile<float, float2, float4, double, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
do_tile<float, float2, float4, float, PROFILE_DATA_TYPE, DISTANCE_TYPE,
COMPUTE_ROWS, COMPUTE_COLS, PROFILE_TYPE, BLOCKSPERSM,
TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
}
return SCAMP_NO_ERROR;
}
SCAMPError_t compute_gpu_resources_and_launch(SCAMPKernelInputArgs<double> args,
Tile *t, void *profile_a,
void *profile_b, bool do_rows,
bool do_cols) {
int exclusion_total = args.exclusion_lower + args.exclusion_upper;
uint64_t blocksz = get_blocksz(t->info()->fp_type, t->get_dev_props());
uint64_t num_workers = ceil((args.n_x - exclusion_total) /
static_cast<double>(DIAGS_PER_THREAD));
uint64_t num_blocks = ceil(num_workers / static_cast<double>(blocksz));
uint64_t smem = get_smem(t->info(), blocksz);
if (exclusion_total < args.n_x) {
switch (t->info()->profile_type) {
case PROFILE_TYPE_SUM_THRESH:
return LaunchDoTile<double, double, PROFILE_TYPE_SUM_THRESH,
BLOCKSPERSM>(
args, reinterpret_cast<double *>(profile_a),
reinterpret_cast<double *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_1NN_INDEX:
return LaunchDoTile<uint64_t, float, PROFILE_TYPE_1NN_INDEX,
BLOCKSPERSM>(
args, reinterpret_cast<uint64_t *>(profile_a),
reinterpret_cast<uint64_t *>(profile_b), t->info()->fp_type,
do_rows, do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_1NN:
return LaunchDoTile<float, float, PROFILE_TYPE_1NN, BLOCKSPERSM>(
args, reinterpret_cast<float *>(profile_a),
reinterpret_cast<float *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
default:
return SCAMP_FUNCTIONALITY_UNIMPLEMENTED;
}
}
return SCAMP_NO_ERROR;
}
SCAMPError_t gpu_kernel_self_join_upper(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, false, false);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_a(), t->profile_b(), t->info()->computing_rows,
t->info()->computing_cols);
}
SCAMPError_t gpu_kernel_self_join_lower(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, true, false);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_b(), t->profile_a(), t->info()->computing_cols,
t->info()->computing_rows);
}
SCAMPError_t gpu_kernel_ab_join_upper(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, false, true);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_a(), t->profile_b(), t->info()->computing_rows,
t->info()->computing_cols);
}
SCAMPError_t gpu_kernel_ab_join_lower(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, true, true);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_b(), t->profile_a(), t->info()->computing_cols,
t->info()->computing_rows);
}
} // namespace SCAMP
|
98cdf143c06fb7a698dc1764427c44a075e47402.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipfft.h>
#include <hipfftXt.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define checkCudaErrors(val) __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ )
template <typename T>
inline void __checkCudaErrors__(T code, const char *func, const char *file, int line)
{
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n",
file, line, (unsigned int)code, func);
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
/********************************/
/* SCALE USING A CUFFT CALLBACK */
/********************************/
__device__ void scale_cufft_callback(
void *dataOut,
size_t offset,
float2 element,
void *callerInfo,
void *sharedPtr)
{
float2 output;
output.x = element.x / 2;
output.y = element.y / 2;
((float2*)dataOut)[offset] = output;
}
__device__
cufftCallbackStoreC d_storeCallbackPtr = scale_cufft_callback;
int main(void){
const int N=2;
// --- Setting up input device vector
thrust::device_vector<float2> d_vec(N,make_cuComplex(1.0f,2.0f));
hipfftHandle plan;
hipfftPlan1d(&plan, N, HIPFFT_C2C, 1);
// --- Preparing the callback
cufftCallbackStoreC h_storeCallbackPtr;
checkCudaErrors(hipMemcpyFromSymbol(&h_storeCallbackPtr,
d_storeCallbackPtr,
sizeof(h_storeCallbackPtr)));
// --- Associating the callback with the plan.
hipfftResult status = cufftXtSetCallback(plan,
(void **)&h_storeCallbackPtr,
CUFFT_CB_ST_COMPLEX,
0);
if (status == HIPFFT_LICENSE_ERROR) {
printf("License file was not found, out of date, or invalid.\n");
exit(EXIT_FAILURE);
} else {
checkCudaErrors(status);
}
// --- Perform in-place direct Fourier transform
checkCudaErrors(hipfftExecC2C(plan, thrust::raw_pointer_cast(d_vec.data()),thrust::raw_pointer_cast(d_vec.data()), HIPFFT_FORWARD));
//thrust::transform(d_vec.begin(), d_vec.end(), d_vec.begin(), scale_result((float)(2)));
// --- Setting up output host vector
thrust::host_vector<float2> h_vec(d_vec);
for (int i=0; i<N; i++) printf("Element #%i: \t (%f, %f)\n",i,h_vec[i].x,h_vec[i].y);
//Clean up
checkCudaErrors(hipfftDestroy(plan));
}
| 98cdf143c06fb7a698dc1764427c44a075e47402.cu | #include <cufft.h>
#include <cufftXt.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define checkCudaErrors(val) __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ )
template <typename T>
inline void __checkCudaErrors__(T code, const char *func, const char *file, int line)
{
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n",
file, line, (unsigned int)code, func);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
/********************************/
/* SCALE USING A CUFFT CALLBACK */
/********************************/
__device__ void scale_cufft_callback(
void *dataOut,
size_t offset,
float2 element,
void *callerInfo,
void *sharedPtr)
{
float2 output;
output.x = element.x / 2;
output.y = element.y / 2;
((float2*)dataOut)[offset] = output;
}
__device__
cufftCallbackStoreC d_storeCallbackPtr = scale_cufft_callback;
int main(void){
const int N=2;
// --- Setting up input device vector
thrust::device_vector<float2> d_vec(N,make_cuComplex(1.0f,2.0f));
cufftHandle plan;
cufftPlan1d(&plan, N, CUFFT_C2C, 1);
// --- Preparing the callback
cufftCallbackStoreC h_storeCallbackPtr;
checkCudaErrors(cudaMemcpyFromSymbol(&h_storeCallbackPtr,
d_storeCallbackPtr,
sizeof(h_storeCallbackPtr)));
// --- Associating the callback with the plan.
cufftResult status = cufftXtSetCallback(plan,
(void **)&h_storeCallbackPtr,
CUFFT_CB_ST_COMPLEX,
0);
if (status == CUFFT_LICENSE_ERROR) {
printf("License file was not found, out of date, or invalid.\n");
exit(EXIT_FAILURE);
} else {
checkCudaErrors(status);
}
// --- Perform in-place direct Fourier transform
checkCudaErrors(cufftExecC2C(plan, thrust::raw_pointer_cast(d_vec.data()),thrust::raw_pointer_cast(d_vec.data()), CUFFT_FORWARD));
//thrust::transform(d_vec.begin(), d_vec.end(), d_vec.begin(), scale_result((float)(2)));
// --- Setting up output host vector
thrust::host_vector<float2> h_vec(d_vec);
for (int i=0; i<N; i++) printf("Element #%i: \t (%f, %f)\n",i,h_vec[i].x,h_vec[i].y);
//Clean up
checkCudaErrors(cufftDestroy(plan));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.